diff --git a/.gitignore b/.gitignore index 1b37ea61..64cc9d45 100644 --- a/.gitignore +++ b/.gitignore @@ -19,3 +19,9 @@ kubeconfig_* .local dist providers + +# ignore for mock api server +mock_server.crt +mock_server.key +MockBuild +mock_api_server.log \ No newline at end of file diff --git a/GNUmakefile b/GNUmakefile index 6b6b063e..402390b8 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -45,10 +45,9 @@ generate: ##@ Test Targets .PHONY: testacc testacc: ## Run acceptance tests - TF_ACC=1 go test -v $(TESTARGS) -covermode=atomic -coverpkg=./... -coverprofile=profile.cov ./... -timeout 120m + TF_ACC=1 go test -v $(TESTARGS) -covermode=atomic -coverpkg=./... -coverprofile=profile.cov ./spectrocloud/... -timeout 120m ##@ Development Targets - DEV_PROVIDER_VERSION=100.100.100 dev-provider: ## Generate dev provider bash generate_dev_provider.sh $(DEV_PROVIDER_VERSION) diff --git a/go.mod b/go.mod index 8a1ef173..c3766d77 100644 --- a/go.mod +++ b/go.mod @@ -5,6 +5,7 @@ go 1.22.5 require ( github.com/go-openapi/strfmt v0.23.0 github.com/google/go-cmp v0.6.0 + github.com/gorilla/mux v1.8.0 github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 github.com/hashicorp/terraform-plugin-docs v0.16.0 github.com/hashicorp/terraform-plugin-sdk/v2 v2.30.0 diff --git a/go.sum b/go.sum index 977c1f3c..56836701 100644 --- a/go.sum +++ b/go.sum @@ -319,6 +319,7 @@ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5m github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= diff --git a/spectrocloud/application_create_common_test.go b/spectrocloud/application_create_common_test.go new file mode 100644 index 00000000..95225ef3 --- /dev/null +++ b/spectrocloud/application_create_common_test.go @@ -0,0 +1,124 @@ +package spectrocloud + +import ( + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestToAppDeploymentClusterGroupEntity(t *testing.T) { + d := schema.TestResourceDataRaw(t, appDeploymentSchema(), map[string]interface{}{ + "name": "test-cluster-group", + "config": []interface{}{ + map[string]interface{}{ + "cluster_group_uid": "cg-uid", + "cluster_name": "cluster-name", + "limits": []interface{}{ + map[string]interface{}{ + "cpu": 4, + "memory": 2048, + "storage": 100, + }, + }, + }, + }, + "application_profile_uid": "app-profile-uid", + "labels": map[string]interface{}{ + "env": "test", + }, + }) + + entity := toAppDeploymentClusterGroupEntity(d) + + assert.NotNil(t, entity) + assert.Equal(t, "test-cluster-group", entity.Metadata.Name) + assert.Equal(t, "cg-uid", *entity.Spec.Config.TargetSpec.ClusterGroupUID) + assert.Equal(t, int32(4), entity.Spec.Config.TargetSpec.ClusterLimits.CPU) + assert.Equal(t, int32(2048), entity.Spec.Config.TargetSpec.ClusterLimits.MemoryMiB) + assert.Equal(t, int32(100), entity.Spec.Config.TargetSpec.ClusterLimits.StorageGiB) + assert.Equal(t, "cluster-name", *entity.Spec.Config.TargetSpec.ClusterName) + assert.Equal(t, "app-profile-uid", *entity.Spec.Profile.AppProfileUID) +} + +func TestToAppDeploymentVirtualClusterEntity(t *testing.T) { + d := schema.TestResourceDataRaw(t, appDeploymentSchema(), map[string]interface{}{ + "name": "test-virtual-cluster", + "config": []interface{}{ + map[string]interface{}{ + "cluster_uid": "vc-uid", + }, + }, + "application_profile_uid": "app-profile-uid", + "labels": map[string]interface{}{ + "env": "prod", + }, + }) + + entity := toAppDeploymentVirtualClusterEntity(d) + + assert.NotNil(t, entity) + assert.Equal(t, "test-virtual-cluster", entity.Metadata.Name) + assert.Equal(t, "vc-uid", *entity.Spec.Config.TargetSpec.ClusterUID) + assert.Equal(t, "app-profile-uid", *entity.Spec.Profile.AppProfileUID) +} + +// Helper function to return a schema.ResourceData schema for testing +func appDeploymentSchema() map[string]*schema.Schema { + return map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + "config": { + Type: schema.TypeList, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cluster_group_uid": { + Type: schema.TypeString, + Optional: true, + }, + "cluster_name": { + Type: schema.TypeString, + Optional: true, + }, + "cluster_uid": { + Type: schema.TypeString, + Optional: true, + }, + "limits": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cpu": { + Type: schema.TypeInt, + Optional: true, + }, + "memory": { + Type: schema.TypeInt, + Optional: true, + }, + "storage": { + Type: schema.TypeInt, + Optional: true, + }, + }, + }, + }, + }, + }, + }, + "application_profile_uid": { + Type: schema.TypeString, + Required: true, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + } +} diff --git a/spectrocloud/cluster_common_test.go b/spectrocloud/cluster_common_test.go index 072b060a..1bd6257d 100644 --- a/spectrocloud/cluster_common_test.go +++ b/spectrocloud/cluster_common_test.go @@ -2,11 +2,16 @@ package spectrocloud import ( "fmt" + "github.com/hashicorp/go-cty/cty" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/spectrocloud/gomi/pkg/ptr" + "github.com/spectrocloud/palette-sdk-go/client" + "github.com/stretchr/testify/require" "reflect" "sort" "testing" + "time" "github.com/spectrocloud/palette-sdk-go/api/models" "github.com/stretchr/testify/assert" @@ -412,22 +417,6 @@ func prepareSpectroClusterModel() *models.V1SpectroCluster { return scp } -//func TestReadCommonFieldsCluster(t *testing.T) { -// d := prepareClusterVsphereTestData() -// spc := prepareSpectroClusterModel() -// c := getClientForCluster() -// _, done := readCommonFields(c, d, spc) -// assert.Equal(t, false, done) -//} - -//func TestReadCommonFieldsVirtualCluster(t *testing.T) { -// d := resourceClusterVirtual().TestResourceData() -// spc := prepareSpectroClusterModel() -// c := getClientForCluster() -// _, done := readCommonFields(c, d, spc) -// assert.Equal(t, false, done) -//} - func TestToSSHKeys(t *testing.T) { // Test case 1: When cloudConfig has "ssh_key" attribute cloudConfig1 := map[string]interface{}{ @@ -486,3 +475,1123 @@ func TestValidateReviewRepaveValue(t *testing.T) { expectedError := fmt.Sprintf("expected review_repave_state to be one of [``, `Pending`, `Approved`], got %s", invalidValue) assert.Equal(t, expectedError, errs[0].Error(), "Expected specific error message for invalid repave value") } + +func TestGeneralWarningForRepave(t *testing.T) { + var diags diag.Diagnostics + + generalWarningForRepave(&diags) + + expectedMessage := "Please note that certain day 2 operations on a running cluster may trigger a node pool repave or a full repave of your cluster. This process might temporarily affect your cluster’s performance or configuration. For more details, please refer to the https://docs.spectrocloud.com/clusters/cluster-management/node-pool/" + + assert.Len(t, diags, 1) + assert.Equal(t, diag.Warning, diags[0].Severity) + assert.Equal(t, "Warning", diags[0].Summary) + assert.Equal(t, expectedMessage, diags[0].Detail) +} + +func TestToClusterRBACsInputEntities(t *testing.T) { + d := resourceClusterGcp().TestResourceData() + var clusterBinding []interface{} + clusterBinding = append(clusterBinding, map[string]interface{}{ + "type": "ClusterRoleBinding", + "namespace": "default", + "role": map[string]interface{}{ + "kind": "ClusterRole", + "name": "admin", + }, + "subjects": []interface{}{ + map[string]interface{}{ + "type": "User", + "name": "admin-user", + "namespace": "default", + }, + }, + }) + clusterBinding = append(clusterBinding, map[string]interface{}{ + "type": "RoleBinding", + "namespace": "default", + "role": map[string]interface{}{ + "kind": "Role", + "name": "edit", + }, + "subjects": []interface{}{ + map[string]interface{}{ + "type": "Group", + "name": "editors", + "namespace": "default", + }, + }, + }) + err := d.Set("cluster_rbac_binding", clusterBinding) + if err != nil { + return + } + + rbacs := toClusterRBACsInputEntities(d) + + assert.Len(t, rbacs, 2) + + assert.Equal(t, "ClusterRoleBinding", rbacs[0].Spec.Bindings[0].Type) + assert.Equal(t, "ClusterRole", rbacs[0].Spec.Bindings[0].Role.Kind) + assert.Equal(t, "admin", rbacs[0].Spec.Bindings[0].Role.Name) + assert.Equal(t, "User", rbacs[0].Spec.Bindings[0].Subjects[0].Type) + assert.Equal(t, "admin-user", rbacs[0].Spec.Bindings[0].Subjects[0].Name) + + assert.Equal(t, "RoleBinding", rbacs[1].Spec.Bindings[0].Type) + assert.Equal(t, "Role", rbacs[1].Spec.Bindings[0].Role.Kind) + assert.Equal(t, "edit", rbacs[1].Spec.Bindings[0].Role.Name) + assert.Equal(t, "Group", rbacs[1].Spec.Bindings[0].Subjects[0].Type) + assert.Equal(t, "editors", rbacs[1].Spec.Bindings[0].Subjects[0].Name) +} + +func TestFlattenClusterRBAC(t *testing.T) { + // Setup test data + clusterRBACs := []*models.V1ClusterRbac{ + { + Spec: &models.V1ClusterRbacSpec{ + Bindings: []*models.V1ClusterRbacBinding{ + { + Type: "ClusterRoleBinding", + Namespace: "default", + Role: &models.V1ClusterRoleRef{ + Kind: "ClusterRole", + Name: "admin", + }, + Subjects: []*models.V1ClusterRbacSubjects{ + { + Type: "User", + Name: "admin-user", + Namespace: "default", + }, + }, + }, + }, + }, + }, + { + Spec: &models.V1ClusterRbacSpec{ + Bindings: []*models.V1ClusterRbacBinding{ + { + Type: "RoleBinding", + Namespace: "kube-system", + Role: &models.V1ClusterRoleRef{ + Kind: "Role", + Name: "edit", + }, + Subjects: []*models.V1ClusterRbacSubjects{ + { + Type: "Group", + Name: "editors", + Namespace: "kube-system", + }, + }, + }, + }, + }, + }, + } + + // Execute the function under test + flattenedRBACs := flattenClusterRBAC(clusterRBACs) + + // Validate the results + assert.Len(t, flattenedRBACs, 2) + + // First RBAC entry + firstRBAC := flattenedRBACs[0].(map[string]interface{}) + assert.Equal(t, "ClusterRoleBinding", firstRBAC["type"]) + assert.Equal(t, "default", firstRBAC["namespace"]) + + firstRole := firstRBAC["role"].(map[string]interface{}) + assert.Equal(t, "ClusterRole", firstRole["kind"]) + assert.Equal(t, "admin", firstRole["name"]) + + firstSubjects := firstRBAC["subjects"].([]interface{}) + assert.Len(t, firstSubjects, 1) + firstSubject := firstSubjects[0].(map[string]interface{}) + assert.Equal(t, "User", firstSubject["type"]) + assert.Equal(t, "admin-user", firstSubject["name"]) + assert.Equal(t, "default", firstSubject["namespace"]) + + // Second RBAC entry + secondRBAC := flattenedRBACs[1].(map[string]interface{}) + assert.Equal(t, "RoleBinding", secondRBAC["type"]) + assert.Equal(t, "kube-system", secondRBAC["namespace"]) + + secondRole := secondRBAC["role"].(map[string]interface{}) + assert.Equal(t, "Role", secondRole["kind"]) + assert.Equal(t, "edit", secondRole["name"]) + + secondSubjects := secondRBAC["subjects"].([]interface{}) + assert.Len(t, secondSubjects, 1) + secondSubject := secondSubjects[0].(map[string]interface{}) + assert.Equal(t, "Group", secondSubject["type"]) + assert.Equal(t, "editors", secondSubject["name"]) + assert.Equal(t, "kube-system", secondSubject["namespace"]) +} + +func TestToNtpServers(t *testing.T) { + data := map[string]interface{}{ + "ntp_servers": schema.NewSet(schema.HashString, []interface{}{"0.pool.ntp1.org"}), + } + + servers := toNtpServers(data) + + expected := []string{"0.pool.ntp1.org"} + assert.Equal(t, expected, servers) +} + +func TestToClusterHostConfigs(t *testing.T) { + d := schema.TestResourceDataRaw(t, map[string]*schema.Schema{ + "host_config": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "host_endpoint_type": { + Type: schema.TypeString, + Optional: true, + }, + "ingress_host": { + Type: schema.TypeString, + Optional: true, + }, + "external_traffic_policy": { + Type: schema.TypeString, + Optional: true, + }, + "load_balancer_source_ranges": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + }, map[string]interface{}{ + "host_config": []interface{}{ + map[string]interface{}{ + "host_endpoint_type": "LoadBalancer", + "ingress_host": "example.com", + "external_traffic_policy": "Cluster", + "load_balancer_source_ranges": "10.0.0.0/24,192.168.1.0/24", + }, + }, + }) + + result := toClusterHostConfigs(d) + + expected := &models.V1HostClusterConfig{ + ClusterEndpoint: &models.V1HostClusterEndpoint{ + Type: "LoadBalancer", + Config: &models.V1HostClusterEndpointConfig{ + IngressConfig: &models.V1IngressConfig{ + Host: "example.com", + }, + LoadBalancerConfig: &models.V1LoadBalancerConfig{ + ExternalTrafficPolicy: "Cluster", + LoadBalancerSourceRanges: []string{"10.0.0.0/24", "192.168.1.0/24"}, + }, + }, + }, + IsHostCluster: ptr.BoolPtr(true), + } + + assert.Equal(t, expected, result) +} + +func TestToClusterHostConfigsNoHostConfig(t *testing.T) { + d := schema.TestResourceDataRaw(t, map[string]*schema.Schema{ + "host_config": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "host_endpoint_type": { + Type: schema.TypeString, + Optional: true, + }, + "ingress_host": { + Type: schema.TypeString, + Optional: true, + }, + "external_traffic_policy": { + Type: schema.TypeString, + Optional: true, + }, + "load_balancer_source_ranges": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + }, map[string]interface{}{}) + + result := toClusterHostConfigs(d) + + expected := &models.V1HostClusterConfig{ + ClusterEndpoint: nil, + IsHostCluster: ptr.BoolPtr(false), + } + + assert.Equal(t, expected, result) +} + +func TestFlattenHostConfig(t *testing.T) { + hostConfig := &models.V1HostClusterConfig{ + ClusterEndpoint: &models.V1HostClusterEndpoint{ + Type: "LoadBalancer", + Config: &models.V1HostClusterEndpointConfig{ + IngressConfig: &models.V1IngressConfig{ + Host: "example.com", + }, + LoadBalancerConfig: &models.V1LoadBalancerConfig{ + ExternalTrafficPolicy: "Cluster", + LoadBalancerSourceRanges: []string{"10.0.0.0/24", "192.168.1.0/24"}, + }, + }, + }, + } + + expected := []interface{}{ + map[string]interface{}{ + "host_endpoint_type": "LoadBalancer", + "ingress_host": "example.com", + "external_traffic_policy": "Cluster", + "load_balancer_source_ranges": "10.0.0.0/24,192.168.1.0/24", + }, + } + + result := flattenHostConfig(hostConfig) + + assert.Equal(t, expected, result) +} + +func TestFlattenHostConfigNil(t *testing.T) { + hostConfig := &models.V1HostClusterConfig{} + + expected := []interface{}{} + + result := flattenHostConfig(hostConfig) + + assert.Equal(t, expected, result) +} + +func TestFlattenSourceRanges(t *testing.T) { + hostConfig := &models.V1HostClusterConfig{ + ClusterEndpoint: &models.V1HostClusterEndpoint{ + Config: &models.V1HostClusterEndpointConfig{ + LoadBalancerConfig: &models.V1LoadBalancerConfig{ + LoadBalancerSourceRanges: []string{"10.0.0.0/24", "192.168.1.0/24"}, + }, + }, + }, + } + + expected := "10.0.0.0/24,192.168.1.0/24" + + result := flattenSourceRanges(hostConfig) + + assert.Equal(t, expected, result) +} + +func TestFlattenSourceRangesNil(t *testing.T) { + hostConfig := &models.V1HostClusterConfig{ + ClusterEndpoint: &models.V1HostClusterEndpoint{ + Config: &models.V1HostClusterEndpointConfig{ + LoadBalancerConfig: &models.V1LoadBalancerConfig{ + LoadBalancerSourceRanges: []string{}, + }, + }, + }, + } + + expected := "" + + result := flattenSourceRanges(hostConfig) + + assert.Equal(t, expected, result) +} + +func TestToClusterLocationConfigs(t *testing.T) { + resourceData := schema.TestResourceDataRaw(t, map[string]*schema.Schema{ + "location_config": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "country_code": { + Type: schema.TypeString, + Optional: true, + }, + "country_name": { + Type: schema.TypeString, + Optional: true, + }, + "region_code": { + Type: schema.TypeString, + Optional: true, + }, + "region_name": { + Type: schema.TypeString, + Optional: true, + }, + "latitude": { + Type: schema.TypeFloat, + Optional: true, + }, + "longitude": { + Type: schema.TypeFloat, + Optional: true, + }, + }, + }, + }, + }, map[string]interface{}{ + "location_config": []interface{}{ + map[string]interface{}{ + "country_code": "US", + "country_name": "United States", + "region_code": "CA", + "region_name": "California", + "latitude": 37.7749, + "longitude": -122.4194, + }, + }, + }) + + expected := &models.V1ClusterLocation{ + CountryCode: "US", + CountryName: "United States", + RegionCode: "CA", + RegionName: "California", + GeoLoc: &models.V1GeolocationLatlong{ + Latitude: 37.7749, + Longitude: -122.4194, + }, + } + + result := toClusterLocationConfigs(resourceData) + + assert.Equal(t, expected, result) +} + +func TestToClusterLocationConfig(t *testing.T) { + config := map[string]interface{}{ + "country_code": "US", + "country_name": "United States", + "region_code": "CA", + "region_name": "California", + "latitude": 37.7749, + "longitude": -122.4194, + } + + expected := &models.V1ClusterLocation{ + CountryCode: "US", + CountryName: "United States", + RegionCode: "CA", + RegionName: "California", + GeoLoc: &models.V1GeolocationLatlong{ + Latitude: 37.7749, + Longitude: -122.4194, + }, + } + + result := toClusterLocationConfig(config) + + assert.Equal(t, expected, result) +} + +func TestToClusterGeoLoc(t *testing.T) { + config := map[string]interface{}{ + "latitude": 37.7749, + "longitude": -122.4194, + } + + expected := &models.V1GeolocationLatlong{ + Latitude: 37.7749, + Longitude: -122.4194, + } + + result := toClusterGeoLoc(config) + + assert.Equal(t, expected, result) +} + +func TestFlattenLocationConfig(t *testing.T) { + location := &models.V1ClusterLocation{ + CountryCode: "US", + CountryName: "United States", + RegionCode: "CA", + RegionName: "California", + GeoLoc: &models.V1GeolocationLatlong{ + Latitude: 37.7749, + Longitude: -122.4194, + }, + } + + expected := []interface{}{ + map[string]interface{}{ + "country_code": "US", + "country_name": "United States", + "region_code": "CA", + "region_name": "California", + "latitude": 37.7749, + "longitude": -122.4194, + }, + } + + result := flattenLocationConfig(location) + + assert.Equal(t, expected, result) +} + +func TestFlattenLocationConfigNil(t *testing.T) { + location := &models.V1ClusterLocation{} + + expected := []interface{}{ + map[string]interface{}{ + "country_code": "", + "country_name": "", + "region_code": "", + "region_name": "", + }, + } + + result := flattenLocationConfig(location) + + assert.Equal(t, expected, result) +} + +func TestGetClusterMetadata(t *testing.T) { + resourceData := resourceClusterAws().TestResourceData() + _ = resourceData.Set("name", "test-cluster") + _ = resourceData.Set("description", "A test cluster") + _ = resourceData.Set("tags", []string{"env:prod", "team:devops"}) + + resourceData.SetId("cluster-uid") + + expected := &models.V1ObjectMeta{ + Name: "test-cluster", + UID: "cluster-uid", + Labels: map[string]string{"env": "prod", "team": "devops"}, + Annotations: map[string]string{"description": "A test cluster"}, + } + + result := getClusterMetadata(resourceData) + + assert.Equal(t, expected, result) +} + +func TestToClusterMetadataUpdate(t *testing.T) { + resourceData := resourceClusterAws().TestResourceData() + _ = resourceData.Set("name", "test-cluster") + _ = resourceData.Set("description", "A test cluster") + _ = resourceData.Set("tags", []string{"env:prod", "team:devops"}) + + expected := &models.V1ObjectMetaInputEntity{ + Name: "test-cluster", + Labels: map[string]string{"env": "prod", "team": "devops"}, + Annotations: map[string]string{"description": "A test cluster"}, + } + + result := toClusterMetadataUpdate(resourceData) + + assert.Equal(t, expected, result) +} + +func TestToUpdateClusterMetadata(t *testing.T) { + resourceData := resourceClusterAws().TestResourceData() + _ = resourceData.Set("name", "test-cluster") + _ = resourceData.Set("description", "A test cluster") + _ = resourceData.Set("tags", []string{"env:prod", "team:devops"}) + + expected := &models.V1ObjectMetaInputEntitySchema{ + Metadata: &models.V1ObjectMetaInputEntity{ + Name: "test-cluster", + Labels: map[string]string{"env": "prod", "team": "devops"}, + Annotations: map[string]string{"description": "A test cluster"}, + }, + } + + result := toUpdateClusterMetadata(resourceData) + + assert.Equal(t, expected, result) +} + +func TestToUpdateClusterAdditionalMetadata(t *testing.T) { + resourceData := resourceClusterAws().TestResourceData() + _ = resourceData.Set("cluster_meta_attribute", "test-cluster-meta-attribute") + expected := &models.V1ClusterMetaAttributeEntity{ + ClusterMetaAttribute: "test-cluster-meta-attribute", + } + result := toUpdateClusterAdditionalMetadata(resourceData) + + assert.Equal(t, expected, result) +} + +func TestFlattenClusterNamespaces(t *testing.T) { + namespaces := []*models.V1ClusterNamespaceResource{ + { + Metadata: &models.V1ObjectMeta{ + Name: "namespace1", + }, + Spec: &models.V1ClusterNamespaceSpec{ + ResourceAllocation: &models.V1ClusterNamespaceResourceAllocation{ + CPUCores: 2, + MemoryMiB: 1024, + }, + }, + }, + { + Metadata: &models.V1ObjectMeta{ + Name: "namespace2", + }, + Spec: &models.V1ClusterNamespaceSpec{ + ResourceAllocation: &models.V1ClusterNamespaceResourceAllocation{ + CPUCores: 4, + MemoryMiB: 2048, + }, + }, + }, + } + + expected := []interface{}{ + map[string]interface{}{ + "name": "namespace1", + "resource_allocation": map[string]interface{}{ + "cpu_cores": "2", + "memory_MiB": "1024", + }, + }, + map[string]interface{}{ + "name": "namespace2", + "resource_allocation": map[string]interface{}{ + "cpu_cores": "4", + "memory_MiB": "2048", + }, + }, + } + + result := flattenClusterNamespaces(namespaces) + + assert.Equal(t, expected, result) +} + +func TestToClusterNamespace(t *testing.T) { + clusterRbacBinding := map[string]interface{}{ + "name": "namespace1", + "resource_allocation": map[string]interface{}{ + "cpu_cores": "2", + "memory_MiB": "1024", + }, + } + + expected := &models.V1ClusterNamespaceResourceInputEntity{ + Metadata: &models.V1ObjectMetaUpdateEntity{ + Name: "namespace1", + }, + Spec: &models.V1ClusterNamespaceSpec{ + IsRegex: false, + ResourceAllocation: &models.V1ClusterNamespaceResourceAllocation{ + CPUCores: 2, + MemoryMiB: 1024, + }, + }, + } + + result := toClusterNamespace(clusterRbacBinding) + + assert.Equal(t, expected, result) +} + +func TestToClusterNamespaces(t *testing.T) { + resourceData := resourceClusterAws().TestResourceData() + var ns []interface{} + ns = append(ns, map[string]interface{}{ + "name": "namespace1", + "resource_allocation": map[string]interface{}{ + "cpu_cores": "2", + "memory_MiB": "1024", + }, + }) + ns = append(ns, map[string]interface{}{ + "name": "namespace2", + "resource_allocation": map[string]interface{}{ + "cpu_cores": "4", + "memory_MiB": "2048", + }, + }) + _ = resourceData.Set("namespaces", ns) + expected := []*models.V1ClusterNamespaceResourceInputEntity{ + { + Metadata: &models.V1ObjectMetaUpdateEntity{ + Name: "namespace1", + }, + Spec: &models.V1ClusterNamespaceSpec{ + IsRegex: false, + ResourceAllocation: &models.V1ClusterNamespaceResourceAllocation{ + CPUCores: 2, + MemoryMiB: 1024, + }, + }, + }, + { + Metadata: &models.V1ObjectMetaUpdateEntity{ + Name: "namespace2", + }, + Spec: &models.V1ClusterNamespaceSpec{ + IsRegex: false, + ResourceAllocation: &models.V1ClusterNamespaceResourceAllocation{ + CPUCores: 4, + MemoryMiB: 2048, + }, + }, + }, + } + + result := toClusterNamespaces(resourceData) + + assert.Equal(t, expected, result) +} + +func TestGetDefaultOsPatchConfig(t *testing.T) { + expected := &models.V1MachineManagementConfig{ + OsPatchConfig: &models.V1OsPatchConfig{ + PatchOnBoot: false, + RebootIfRequired: false, + }, + } + + result := getDefaultOsPatchConfig() + + assert.Equal(t, expected, result) +} + +func TestToUpdateOsPatchEntityClusterRbac(t *testing.T) { + config := &models.V1OsPatchConfig{ + PatchOnBoot: true, + RebootIfRequired: true, + } + + expected := &models.V1OsPatchEntity{ + OsPatchConfig: config, + } + + result := toUpdateOsPatchEntityClusterRbac(config) + + assert.Equal(t, expected, result) +} + +func TestToOsPatchConfig(t *testing.T) { + resourceData := schema.TestResourceDataRaw(t, map[string]*schema.Schema{ + "os_patch_on_boot": { + Type: schema.TypeBool, + Optional: true, + }, + "os_patch_schedule": { + Type: schema.TypeString, + Optional: true, + }, + "os_patch_after": { + Type: schema.TypeString, + Optional: true, + }, + }, map[string]interface{}{ + "os_patch_on_boot": true, + "os_patch_schedule": "0 0 * * *", + "os_patch_after": "2024-01-01T00:00:00.000Z", + }) + + patchTime, _ := time.Parse(time.RFC3339, "2024-01-01T00:00:00.000Z") + + expected := &models.V1OsPatchConfig{ + PatchOnBoot: true, + Schedule: "0 0 * * *", + OnDemandPatchAfter: models.V1Time(patchTime), + } + + result := toOsPatchConfig(resourceData) + + assert.Equal(t, expected, result) +} + +func TestValidateOsPatchSchedule(t *testing.T) { + validData := "0 0 * * *" + invalidData := "invalid cron expression" + + validResult := validateOsPatchSchedule(validData, nil) + invalidResult := validateOsPatchSchedule(invalidData, nil) + + assert.Empty(t, validResult) + assert.NotEmpty(t, invalidResult) + assert.Contains(t, invalidResult[0].Summary, "os patch schedule is invalid") +} + +func TestValidateOsPatchOnDemandAfter(t *testing.T) { + validData := time.Now().Add(20 * time.Minute).Format(time.RFC3339) + invalidData := "invalid time format" + pastData := time.Now().Add(-20 * time.Minute).Format(time.RFC3339) + + validResult := validateOsPatchOnDemandAfter(validData, nil) + invalidResult := validateOsPatchOnDemandAfter(invalidData, nil) + pastResult := validateOsPatchOnDemandAfter(pastData, nil) + + assert.Empty(t, validResult) + assert.NotEmpty(t, invalidResult) + assert.Contains(t, invalidResult[0].Summary, "time for 'os_patch_after' is invalid") + + assert.NotEmpty(t, pastResult) + assert.Contains(t, pastResult[0].Summary, "valid timestamp is timestamp which is 10 mins ahead of current timestamp") +} + +func TestToSpcApplySettings(t *testing.T) { + // Test case when "apply_setting" is set + resourceData := schema.TestResourceDataRaw(t, map[string]*schema.Schema{ + "apply_setting": { + Type: schema.TypeString, + Optional: true, + }, + }, map[string]interface{}{ + "apply_setting": "reboot", + }) + + expected := &models.V1SpcApplySettings{ + ActionType: "reboot", + } + + result, err := toSpcApplySettings(resourceData) + + assert.Nil(t, err) + assert.Equal(t, expected, result) + + // Test case when "apply_setting" is not set (empty string) + resourceDataEmpty := schema.TestResourceDataRaw(t, map[string]*schema.Schema{ + "apply_setting": { + Type: schema.TypeString, + Optional: true, + }, + }, map[string]interface{}{ + "apply_setting": "", + }) + + resultEmpty, errEmpty := toSpcApplySettings(resourceDataEmpty) + + assert.Nil(t, errEmpty) + assert.Nil(t, resultEmpty) + + // Test case when "apply_setting" is not present at all + resourceDataNil := schema.TestResourceDataRaw(t, map[string]*schema.Schema{ + "apply_setting": { + Type: schema.TypeString, + Optional: true, + }, + }, map[string]interface{}{}) + + resultNil, errNil := toSpcApplySettings(resourceDataNil) + + assert.Nil(t, errNil) + assert.Nil(t, resultNil) +} + +func TestGetNodeValue(t *testing.T) { + // Test case 1: Standard input + nodeId := "node1" + action := "update" + expected := map[string]interface{}{ + "node_id": nodeId, + "action": action, + } + result := getNodeValue(nodeId, action) + assert.Equal(t, expected, result, "The returned map should match the expected map") + + // Test case 2: Different action + nodeId = "node2" + action = "reboot" + expected = map[string]interface{}{ + "node_id": nodeId, + "action": action, + } + result = getNodeValue(nodeId, action) + assert.Equal(t, expected, result, "The returned map should match the expected map") + + // Test case 3: Empty action + nodeId = "node3" + action = "" + expected = map[string]interface{}{ + "node_id": nodeId, + "action": action, + } + result = getNodeValue(nodeId, action) + assert.Equal(t, expected, result, "The returned map should match the expected map") + + // Test case 4: Empty nodeId + nodeId = "" + action = "update" + expected = map[string]interface{}{ + "node_id": nodeId, + "action": action, + } + result = getNodeValue(nodeId, action) + assert.Equal(t, expected, result, "The returned map should match the expected map") +} + +func TestGetSpectroComponentsUpgrade(t *testing.T) { + tests := []struct { + name string + cluster *models.V1SpectroCluster + expected string + }{ + { + name: "Annotation is 'true'", + cluster: &models.V1SpectroCluster{ + Metadata: &models.V1ObjectMeta{ + Annotations: map[string]string{ + "spectroComponentsUpgradeForbidden": "true", + }, + }, + }, + expected: "lock", + }, + { + name: "Annotation is 'false'", + cluster: &models.V1SpectroCluster{ + Metadata: &models.V1ObjectMeta{ + Annotations: map[string]string{ + "spectroComponentsUpgradeForbidden": "false", + }, + }, + }, + expected: "unlock", + }, + { + name: "Annotation is not present", + cluster: &models.V1SpectroCluster{ + Metadata: &models.V1ObjectMeta{ + Annotations: map[string]string{}, + }, + }, + expected: "unlock", + }, + { + name: "Annotations are nil", + cluster: &models.V1SpectroCluster{ + Metadata: &models.V1ObjectMeta{ + Annotations: nil, + }, + }, + expected: "unlock", + }, + { + name: "Different annotation key", + cluster: &models.V1SpectroCluster{ + Metadata: &models.V1ObjectMeta{ + Annotations: map[string]string{ + "otherKey": "someValue", + }, + }, + }, + expected: "unlock", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := getSpectroComponentsUpgrade(tt.cluster) + require.Equal(t, tt.expected, result) + }) + } +} + +func TestGetCommonCluster(t *testing.T) { + tests := []struct { + name string + resourceData *schema.ResourceData + expectedError string + expectedID string + expectedName string + expectedCtx string + }{ + { + name: "Successful cluster retrieval", + + resourceData: func() *schema.ResourceData { + d := resourceClusterGcp().TestResourceData() + d.SetId("cluster-id:project") + return d + }(), + expectedError: "", + expectedID: "cluster-id", + expectedName: "cluster-name", + expectedCtx: "resource-context", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + + _, err := GetCommonCluster(tt.resourceData, unitTestMockAPIClient) + assert.NoError(t, err) + + }) + } +} + +func TestValidateCloudType(t *testing.T) { + tests := []struct { + name string + resourceName string + cluster *models.V1SpectroCluster + expectedError string + }{ + { + name: "Successful validation", + resourceName: "spectrocloud_cluster_aws", + cluster: &models.V1SpectroCluster{ + Metadata: &models.V1ObjectMeta{UID: "cluster-uid-123"}, + Spec: &models.V1SpectroClusterSpec{CloudType: "aws"}, + }, + expectedError: "", + }, + { + name: "Cluster spec is nil", + resourceName: "spectrocloud_cluster_aws", + cluster: &models.V1SpectroCluster{ + Metadata: &models.V1ObjectMeta{UID: "cluster-uid-123"}, + Spec: nil, + }, + expectedError: "cluster spec is nil in cluster cluster-uid-123", + }, + { + name: "Cloud type mismatch", + resourceName: "spectrocloud_cluster_aws", + cluster: &models.V1SpectroCluster{ + Metadata: &models.V1ObjectMeta{UID: "cluster-uid-123"}, + Spec: &models.V1SpectroClusterSpec{CloudType: "gcp"}, + }, + expectedError: "resource with id cluster-uid-123 is not of type spectrocloud_cluster_aws, need to correct resource type", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := ValidateCloudType(tt.resourceName, tt.cluster) + + if tt.expectedError != "" { + assert.Error(t, err) + assert.Contains(t, err.Error(), tt.expectedError) + } else { + assert.NoError(t, err) + } + }) + } +} + +func TestUpdateAgentUpgradeSetting(t *testing.T) { + resourceData := schema.TestResourceDataRaw(t, map[string]*schema.Schema{ + "pause_agent_upgrades": { + Type: schema.TypeString, + Optional: true, + }, + }, map[string]interface{}{}) + + tests := []struct { + name string + inputPauseUpgrades string + mockError error + expectError bool + }{ + { + name: "Pause agent upgrades is set", + inputPauseUpgrades: "true", + mockError: nil, + expectError: false, + }, + { + name: "Pause agent upgrades is not set", + inputPauseUpgrades: "", + mockError: nil, + expectError: false, + }, + { + name: "Client returns an error", + inputPauseUpgrades: "true", + mockError: assert.AnError, + expectError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + resourceData.Set("pause_agent_upgrades", tt.inputPauseUpgrades) + resourceData.SetId("test-cluster-id") + + err := updateAgentUpgradeSetting(getV1ClientWithResourceContext(unitTestMockAPIClient, "project"), resourceData) + + if tt.expectError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + + }) + } +} + +func TestValidateCloudTypeOne(t *testing.T) { + tests := []struct { + name string + input interface{} + expectedDiags diag.Diagnostics + expectedErrors bool + }{ + { + name: "Valid cloud type: aws", + input: "aws", + expectedDiags: diag.Diagnostics{}, + expectedErrors: false, + }, + { + name: "Valid cloud type: azure", + input: "azure", + expectedDiags: diag.Diagnostics{}, + expectedErrors: false, + }, + { + name: "Invalid cloud type", + input: "invalid-cloud", + expectedDiags: diag.Diagnostics{diag.Diagnostic{Severity: diag.Error, Summary: fmt.Sprintf("cloud type '%s' is invalid. valid cloud types are %v", "invalid-cloud", "cloud_types")}}, + expectedErrors: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + diags := validateCloudType(tt.input, cty.Path{}) + + if tt.expectedErrors { + assert.Len(t, diags, 1) + assert.Equal(t, tt.expectedDiags[0].Summary, diags[0].Summary) + } else { + assert.Empty(t, diags) + } + }) + } +} + +func TestFlattenCloudConfigGeneric(t *testing.T) { + resourceData := schema.TestResourceDataRaw(t, map[string]*schema.Schema{ + "cloud_config_id": { + Type: schema.TypeString, + Optional: true, + }, + }, map[string]interface{}{}) + + client := &client.V1Client{} + configUID := "test-config-uid" + + diags := flattenCloudConfigGeneric(configUID, resourceData, client) + + assert.Empty(t, diags) + assert.Equal(t, configUID, resourceData.Get("cloud_config_id")) +} diff --git a/spectrocloud/cluster_node_common_test.go b/spectrocloud/cluster_node_common_test.go index a99597ec..6be0124c 100644 --- a/spectrocloud/cluster_node_common_test.go +++ b/spectrocloud/cluster_node_common_test.go @@ -45,3 +45,58 @@ func TestGetMachinePoolList(t *testing.T) { }) } } + +// Test for getNodeValue function +func TestGetNodeValue1(t *testing.T) { + nodeId := "node-123" + action := "action-xyz" + + expected := map[string]interface{}{ + "node_id": nodeId, + "action": action, + } + + result := getNodeValue(nodeId, action) + assert.Equal(t, expected, result) +} + +// Test for getMachinePoolList function +func TestGetMachinePoolList1(t *testing.T) { + tests := []struct { + name string + input interface{} + expected []interface{} + isError bool + }{ + { + name: "With []interface{}", + input: []interface{}{ + map[string]interface{}{"name": "pool1"}, + map[string]interface{}{"name": "pool2"}, + }, + expected: []interface{}{ + map[string]interface{}{"name": "pool1"}, + map[string]interface{}{"name": "pool2"}, + }, + isError: false, + }, + { + name: "With invalid type", + input: "invalid", + expected: nil, + isError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, _, err := getMachinePoolList(tt.input) + if tt.isError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.ElementsMatch(t, tt.expected, result) + } + }) + } +} diff --git a/spectrocloud/common_addon_depl_test.go b/spectrocloud/common_addon_depl_test.go index 6a29fc54..427d8eaf 100644 --- a/spectrocloud/common_addon_depl_test.go +++ b/spectrocloud/common_addon_depl_test.go @@ -1,34 +1 @@ package spectrocloud - -//func TestToAddonDeployment(t *testing.T) { -// assert := assert.New(t) -// -// // Create a mock ResourceData object -// d := prepareAddonDeploymentTestData("depl-test-id") -// -// m := &client.V1Client{} -// -// addonDeployment, err := toAddonDeployment(m, d) -// assert.Nil(err) -// -// // Verifying apply setting -// assert.Equal(d.Get("apply_setting"), addonDeployment.SpcApplySettings.ActionType) -// -// // Verifying cluster profile -// profiles := d.Get("cluster_profile").([]interface{}) -// for i, profile := range profiles { -// p := profile.(map[string]interface{}) -// assert.Equal(p["id"].(string), addonDeployment.Profiles[i].UID) -// -// // Verifying pack values -// packValues := p["pack"].([]interface{}) -// for j, pack := range packValues { -// packMap := pack.(map[string]interface{}) -// assert.Equal(packMap["name"], *addonDeployment.Profiles[i].PackValues[j].Name) -// assert.Equal(packMap["tag"], addonDeployment.Profiles[i].PackValues[j].Tag) -// assert.Equal(packMap["type"], string(addonDeployment.Profiles[i].PackValues[j].Type)) -// assert.Equal(packMap["values"], addonDeployment.Profiles[i].PackValues[j].Values) -// } -// -// } -//} diff --git a/spectrocloud/common_test.go b/spectrocloud/common_test.go index a1ed154e..abe0b9ae 100644 --- a/spectrocloud/common_test.go +++ b/spectrocloud/common_test.go @@ -1,61 +1,196 @@ package spectrocloud import ( + "context" + "crypto/tls" + "errors" "fmt" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/spectrocloud/palette-sdk-go/client" + "github.com/stretchr/testify/assert" + "net/http" "os" + "os/exec" + "path/filepath" "testing" + "time" ) -type Cred struct { - hubbleHost string - project string - apikey string - component string - AlertUid string -} +//type Cred struct { +// hubbleHost string +// project string +// apikey string +// component string +// AlertUid string +//} + +const ( + negativeHost = "127.0.0.1:8888" + host = "127.0.0.1:8080" + trace = false + retryAttempts = 10 + apiKey = "12345" + projectName = "unittest" + projectUID = "testprojectuid" +) -var baseConfig Cred +// var baseConfig Cred +var unitTestMockAPIClient interface{} +var unitTestMockAPINegativeClient interface{} + +var basePath = "" +var startMockApiServerScript = "" +var stopMockApiServerScript = "" func TestMain(m *testing.M) { - setup() + cwd, _ := os.Getwd() + _ = os.Setenv("TF_SRC", filepath.Dir(cwd)) + basePath = os.Getenv("TF_SRC") + startMockApiServerScript = basePath + "/tests/mockApiServer/start_mock_api_server.sh" + stopMockApiServerScript = basePath + "/tests/mockApiServer/stop_mock_api_server.sh" + fmt.Printf("\033[1;36m%s\033[0m", "> [Debug] Basepath -"+basePath+" \n") + err := setup() + if err != nil { + fmt.Printf("Error during setup: %v\n", err) + os.Exit(1) + } code := m.Run() teardown() os.Exit(code) } -func setup() { - // Setting up test credentials & base config from env variables - baseConfig.hubbleHost = getEnvWithFallBack("TEST_HOST") - baseConfig.project = getEnvWithFallBack("TEST_PROJECT") - baseConfig.apikey = getEnvWithFallBack("TEST_API_KEY") - baseConfig.component = "ClusterHealth" - baseConfig.AlertUid = "" - if IsIntegrationTestEnvSet(baseConfig) { - fmt.Printf("\033[1;36m%s\033[0m", "> Credentials & Base config setup completed\n") - fmt.Printf("\033[1;36m%s\033[0m", "-- Test Runnig with below crdentials & base config\n") - fmt.Printf("* Test host - %s \n", baseConfig.hubbleHost) - fmt.Printf("* Test project - %s \n", baseConfig.project) - fmt.Printf("* Test key - %s \n", "***********************") - fmt.Printf("\033[1;36m%s\033[0m", "-------------------------------\n") - } else { - fmt.Printf("\033[1;36m%s\033[0m", "> Since env variable not sipping integration test\n") - } - fmt.Printf("\033[1;36m%s\033[0m", "> Setup completed \n") +func unitTestProviderConfigure(ctx context.Context) (interface{}, diag.Diagnostics) { + host := host + apiKey := apiKey + retryAttempts := retryAttempts + + // Warning or errors can be collected in a slice type + var diags diag.Diagnostics + + c := client.New( + client.WithPaletteURI(host), + client.WithAPIKey(apiKey), + client.WithRetries(retryAttempts), + client.WithInsecureSkipVerify(true), + client.WithRetries(1)) + + //// comment to trace flag + //client.WithTransportDebug()(c) + + uid := projectUID + ProviderInitProjectUid = uid + client.WithScopeProject(uid)(c) + return c, diags } -func IsIntegrationTestEnvSet(config Cred) (envSet bool) { - if config.hubbleHost != "" && config.project != "" && config.apikey != "" { - return true - } else { - return false + +func unitTestNegativeCaseProviderConfigure(ctx context.Context) (interface{}, diag.Diagnostics) { + apiKey := apiKey + retryAttempts := retryAttempts + + // Warning or errors can be collected in a slice type + var diags diag.Diagnostics + + c := client.New( + client.WithPaletteURI(negativeHost), + client.WithAPIKey(apiKey), + client.WithRetries(retryAttempts), + client.WithInsecureSkipVerify(true), + client.WithRetries(1)) + + //// comment to trace flag + //client.WithTransportDebug()(c) + + uid := projectUID + ProviderInitProjectUid = uid + client.WithScopeProject(uid)(c) + return c, diags +} + +func checkMockServerHealth() error { + maxRetries := 5 + delay := 2 * time.Second + + // Skip TLS verification (use with caution; not recommended for production) + tr := &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, } + c := &http.Client{Transport: tr} + + for i := 0; i < maxRetries; i++ { + // Create a new HTTP request + req, err := http.NewRequest("GET", "https://127.0.0.1:8080/v1/health", nil) + if err != nil { + return err + } + + // Add the API key as a header + req.Header.Set("ApiKey", "12345") + + // Send the request + resp, err := c.Do(req) + if err == nil && resp.StatusCode == http.StatusOK { + // Server is up and running + err := resp.Body.Close() + if err != nil { + return err + } + return nil + } + + if resp != nil { + err := resp.Body.Close() + if err != nil { + return err + } + } + + // Wait before retrying + time.Sleep(delay) + } + + return errors.New("server is not responding after multiple attempts") } -func getEnvWithFallBack(key string) (response string) { - value := os.Getenv(key) - if len(value) == 0 { - return "" + +func setup() error { + fmt.Printf("\033[1;36m%s\033[0m", "> Starting Mock API Server \n") + var ctx context.Context + + cmd := exec.Command("sh", startMockApiServerScript) + output, err := cmd.CombinedOutput() + err = checkMockServerHealth() + if err != nil { + fmt.Printf("Failed to run start api server script: %s\nError: %s", output, err) + return err } - return value + + fmt.Printf("\033[1;36m%s\033[0m", "> Started Mock Api Server at https://127.0.0.1:8080 & https://127.0.0.1:8888 \n") + unitTestMockAPIClient, _ = unitTestProviderConfigure(ctx) + unitTestMockAPINegativeClient, _ = unitTestNegativeCaseProviderConfigure(ctx) + fmt.Printf("\033[1;36m%s\033[0m", "> Setup completed \n") + return nil } + func teardown() { + cmd := exec.Command("bash", stopMockApiServerScript) + _, _ = cmd.CombinedOutput() + fmt.Printf("\033[1;36m%s\033[0m", "> Stopped Mock Api Server \n") fmt.Printf("\033[1;36m%s\033[0m", "> Teardown completed \n") + err := deleteBuild() + if err != nil { + fmt.Printf("Test Clean up is incomplete: %v\n", err) + } +} + +func deleteBuild() error { + err := os.Remove(basePath + "/tests/mockApiServer/MockBuild") + if err != nil { + return err + } + return nil +} + +func assertFirstDiagMessage(t *testing.T, diags diag.Diagnostics, msg string) { + if assert.NotEmpty(t, diags, "Expected diags to contain at least one element") { + assert.Contains(t, diags[0].Summary, msg, "The first diagnostic message does not contain the expected error message") + } } diff --git a/spectrocloud/convert/volume_kubevirt_to_hapi_common_test.go b/spectrocloud/convert/volume_kubevirt_to_hapi_common_test.go new file mode 100644 index 00000000..fc37f5eb --- /dev/null +++ b/spectrocloud/convert/volume_kubevirt_to_hapi_common_test.go @@ -0,0 +1,351 @@ +package convert + +import ( + "encoding/json" + "errors" + "github.com/go-openapi/strfmt" + "github.com/spectrocloud/palette-sdk-go/api/models" + "github.com/spectrocloud/terraform-provider-spectrocloud/types" + "github.com/stretchr/testify/assert" + k8sv1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kubevirtapiv1 "kubevirt.io/api/core/v1" + cdiv1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1" + "testing" +) + +func TestToHapiVolume(t *testing.T) { + // Step 1: Prepare sample DataVolume object + volume := &cdiv1.DataVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-volume", + Namespace: "default", + Annotations: map[string]string{"test-annotation-key": "test-annotation-value"}, + DeletionGracePeriodSeconds: int64Ptr(30), + Finalizers: []string{"test-finalizer"}, + GenerateName: "test-", + Generation: 1, + Labels: map[string]string{"test-label-key": "test-label-value"}, + ManagedFields: []metav1.ManagedFieldsEntry{}, + OwnerReferences: []metav1.OwnerReference{}, + ResourceVersion: "123456", + UID: "test-uid", + }, + Spec: cdiv1.DataVolumeSpec{ + // Populate DataVolumeSpec fields + }, + } + + // Step 2: Prepare AddVolumeOptions + addVolumeOptions := &models.V1VMAddVolumeOptions{ + // Populate AddVolumeOptions fields + } + + // Step 3: Call the function + hapiVolume, err := ToHapiVolume(volume, addVolumeOptions) + + // Step 4: Validate the result + assert.NoError(t, err) + assert.NotNil(t, hapiVolume) + assert.Equal(t, "test-volume", hapiVolume.DataVolumeTemplate.Metadata.Name) + assert.Equal(t, "default", hapiVolume.DataVolumeTemplate.Metadata.Namespace) + assert.Equal(t, "test-uid", hapiVolume.DataVolumeTemplate.Metadata.UID) + assert.Equal(t, int64(30), hapiVolume.DataVolumeTemplate.Metadata.DeletionGracePeriodSeconds) + // Add more assertions as necessary to validate other fields +} + +func int64Ptr(i int64) *int64 { + return &i +} + +func TestFromHapiVolume(t *testing.T) { + // Step 1: Prepare sample V1VMAddVolumeEntity object + hapiVolume := &models.V1VMAddVolumeEntity{ + DataVolumeTemplate: &models.V1VMDataVolumeTemplateSpec{ + Metadata: &models.V1VMObjectMeta{ + Annotations: map[string]string{"test-annotation-key": "test-annotation-value"}, + DeletionGracePeriodSeconds: 30, + Finalizers: []string{"test-finalizer"}, + GenerateName: "test-", + Generation: 1, + Labels: map[string]string{"test-label-key": "test-label-value"}, + Name: "test-volume", + Namespace: "default", + ResourceVersion: "123456", + UID: "test-uid", + }, + Spec: &models.V1VMDataVolumeSpec{}, + }, + } + + // Step 2: Call the function + volume, err := FromHapiVolume(hapiVolume) + + // Step 3: Validate the result + assert.NoError(t, err) + assert.NotNil(t, volume) + assert.Equal(t, "test-volume", volume.Name) + assert.Equal(t, "default", volume.Namespace) + assert.Equal(t, "test-uid", string(volume.UID)) + assert.Equal(t, "test-annotation-value", volume.Annotations["test-annotation-key"]) + assert.Equal(t, int64(30), *volume.DeletionGracePeriodSeconds) + + // Validate the Spec + specJson, err := json.Marshal(hapiVolume.DataVolumeTemplate.Spec) + assert.NoError(t, err) + + var expectedSpec cdiv1.DataVolumeSpec + err = json.Unmarshal(specJson, &expectedSpec) + assert.NoError(t, err) + assert.Equal(t, expectedSpec, volume.Spec) +} + +func TestToHapiVmStatusM(t *testing.T) { + // Step 1: Prepare sample VirtualMachineStatus object + status := kubevirtapiv1.VirtualMachineStatus{ + + PrintableStatus: "Running", + } + + // Step 2: Call the function + hapiVmStatus, err := ToHapiVmStatusM(status) + + // Step 3: Validate the result + assert.NoError(t, err) + assert.NotNil(t, hapiVmStatus) + + // Validate the fields that are mapped + assert.Equal(t, "Running", hapiVmStatus.PrintableStatus) + +} + +func TestToHapiVmSpecM(t *testing.T) { + spec := kubevirtapiv1.VirtualMachineSpec{ + Running: func(b bool) *bool { return &b }(true), + } + + hapiVmSpec, err := ToHapiVmSpecM(spec) + assert.NoError(t, err) + assert.NotNil(t, hapiVmSpec) +} + +func TestToHapiVm(t *testing.T) { + vm := &kubevirtapiv1.VirtualMachine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-vm", + Namespace: "default", + Annotations: map[string]string{"key": "value"}, + DeletionGracePeriodSeconds: func(i int64) *int64 { return &i }(30), + Finalizers: []string{"finalizer"}, + GenerateName: "test-vm-", + Generation: 1, + Labels: map[string]string{"label": "value"}, + ManagedFields: []metav1.ManagedFieldsEntry{ + { + APIVersion: "v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte("raw-data")}, + Manager: "manager", + Operation: "Update", + }, + }, + ResourceVersion: "12345", + UID: "uid12345", + }, + Spec: kubevirtapiv1.VirtualMachineSpec{ + Running: func(b bool) *bool { return &b }(true), + }, + Status: kubevirtapiv1.VirtualMachineStatus{ + Ready: false, + PrintableStatus: "Running", + }, + } + + hapiVM, err := ToHapiVm(vm) + assert.NoError(t, err) + assert.NotNil(t, hapiVM) + + _, err = json.Marshal(vm.Spec) + assert.NoError(t, err) + + assert.Equal(t, vm.ObjectMeta.Name, hapiVM.Metadata.Name) + assert.Equal(t, vm.ObjectMeta.Namespace, hapiVM.Metadata.Namespace) + assert.Equal(t, vm.ObjectMeta.Annotations, hapiVM.Metadata.Annotations) + assert.Equal(t, *vm.DeletionGracePeriodSeconds, hapiVM.Metadata.DeletionGracePeriodSeconds) + assert.Equal(t, vm.ObjectMeta.Finalizers, hapiVM.Metadata.Finalizers) + assert.Equal(t, vm.ObjectMeta.GenerateName, hapiVM.Metadata.GenerateName) + assert.Equal(t, vm.ObjectMeta.Generation, hapiVM.Metadata.Generation) + assert.Equal(t, vm.ObjectMeta.Labels, hapiVM.Metadata.Labels) + assert.Equal(t, vm.ObjectMeta.ResourceVersion, hapiVM.Metadata.ResourceVersion) + assert.Equal(t, string(vm.ObjectMeta.UID), hapiVM.Metadata.UID) +} + +func TestToKubevirtVMStatusM(t *testing.T) { + status := &models.V1ClusterVirtualMachineStatus{ + Created: true, + Ready: true, + SnapshotInProgress: "snapshot-in-progress", + RestoreInProgress: "restore-in-progress", + PrintableStatus: "Running", + } + + kubevirtStatus, err := ToKubevirtVMStatusM(status) + assert.NoError(t, err) + assert.NotNil(t, kubevirtStatus) + +} + +func TestToKubevirtVMStatus(t *testing.T) { + status := &models.V1ClusterVirtualMachineStatus{ + Created: true, + Ready: true, + SnapshotInProgress: "snapshot-in-progress", + RestoreInProgress: "restore-in-progress", + PrintableStatus: "Running", + Conditions: []*models.V1VMVirtualMachineCondition{ + { + Type: types.Ptr("Ready"), + Status: types.Ptr("True"), + Reason: "Ready", + Message: "VM is ready", + }, + }, + } + + kubevirtStatus := ToKubevirtVMStatus(status) + assert.NotNil(t, kubevirtStatus) + + assert.Equal(t, status.Created, kubevirtStatus.Created) + assert.Equal(t, status.Ready, kubevirtStatus.Ready) + assert.Equal(t, status.SnapshotInProgress, *kubevirtStatus.SnapshotInProgress) + assert.Equal(t, status.RestoreInProgress, *kubevirtStatus.RestoreInProgress) + assert.Equal(t, kubevirtapiv1.VirtualMachinePrintableStatus(status.PrintableStatus), kubevirtStatus.PrintableStatus) + + assert.Len(t, kubevirtStatus.Conditions, len(status.Conditions)) + for i, condition := range kubevirtStatus.Conditions { + assert.Equal(t, kubevirtapiv1.VirtualMachineConditionType(*status.Conditions[i].Type), condition.Type) + assert.Equal(t, k8sv1.ConditionStatus(*status.Conditions[i].Status), condition.Status) + assert.Equal(t, status.Conditions[i].Reason, condition.Reason) + assert.Equal(t, status.Conditions[i].Message, condition.Message) + } +} + +func TestToKubevirtVMSpecM(t *testing.T) { + // Create a test input for V1ClusterVirtualMachineSpec + testSpec := &models.V1ClusterVirtualMachineSpec{ + DataVolumeTemplates: []*models.V1VMDataVolumeTemplateSpec{ + { + APIVersion: "", + Kind: "", + Metadata: &models.V1VMObjectMeta{ + Name: "test-volume", + UID: "test-uid-volume", + }, + Spec: nil, + }, + }, + Instancetype: &models.V1VMInstancetypeMatcher{ + InferFromVolume: "", + Kind: "", + Name: "test-instance-type", + RevisionName: "testins", + }, + Preference: &models.V1VMPreferenceMatcher{ + InferFromVolume: "test-vol", + Kind: "node", + Name: "test-pref", + RevisionName: "testpref", + }, + RunStrategy: "new", + Running: true, + Template: &models.V1VMVirtualMachineInstanceTemplateSpec{ + Metadata: &models.V1VMObjectMeta{ + Name: "test-tem", + UID: "test-uid-template", + }, + Spec: &models.V1VMVirtualMachineInstanceSpec{ + AccessCredentials: nil, + Affinity: nil, + DNSConfig: nil, + DNSPolicy: "test-1", + Domain: nil, + EvictionStrategy: "ready", + Hostname: "127.0.0.1", + LivenessProbe: nil, + Networks: nil, + NodeSelector: nil, + PriorityClassName: "level", + ReadinessProbe: nil, + SchedulerName: "auto", + StartStrategy: "run", + Subdomain: "test.test.com", + TerminationGracePeriodSeconds: 10, + Tolerations: nil, + TopologySpreadConstraints: nil, + Volumes: nil, + }, + }, + } + + // Call the function under test + _, err := ToKubevirtVMSpecM(testSpec) + + // Assert no error occurred + assert.NoError(t, err) + +} + +func TestToKubevirtVM(t *testing.T) { + hapiVM := &models.V1ClusterVirtualMachine{ + Kind: "VirtualMachine", + APIVersion: "kubevirt.io/v1", + Metadata: &models.V1VMObjectMeta{ + Name: "test-vm", + Namespace: "test-namespace", + UID: "123456", + OwnerReferences: []*models.V1VMOwnerReference{ + { + APIVersion: types.Ptr("v1"), + BlockOwnerDeletion: true, + Controller: true, + Kind: types.Ptr("ReplicaSet"), + Name: types.Ptr("test-owner"), + UID: types.Ptr("654321"), + }, + }, + ManagedFields: []*models.V1VMManagedFieldsEntry{ + { + APIVersion: "v1", + FieldsType: "FieldsV1", + FieldsV1: &models.V1VMFieldsV1{ + Raw: []strfmt.Base64{strfmt.Base64("c29tZS1maWVsZHM=")}, + }, + Manager: "kubectl", + Operation: "Apply", + }, + }, + }, + Spec: &models.V1ClusterVirtualMachineSpec{ + Running: true, + }, + Status: &models.V1ClusterVirtualMachineStatus{ + Created: true, + }, + } + + kubevirtVM, err := ToKubevirtVM(hapiVM) + assert.NoError(t, err) + assert.NotNil(t, kubevirtVM) + assert.Equal(t, "test-vm", kubevirtVM.Name) + assert.Equal(t, "test-namespace", kubevirtVM.Namespace) + assert.Equal(t, 1, len(kubevirtVM.OwnerReferences)) + assert.Equal(t, "654321", string(kubevirtVM.OwnerReferences[0].UID)) + assert.Equal(t, "Apply", string(kubevirtVM.ManagedFields[0].Operation)) +} + +func TestToKubevirtVM_NilInput(t *testing.T) { + kubevirtVM, err := ToKubevirtVM(nil) + assert.Error(t, err) + assert.Nil(t, kubevirtVM) + assert.Equal(t, errors.New("hapiVM is nil"), err) +} diff --git a/spectrocloud/data_source_appliance_test.go b/spectrocloud/data_source_appliance_test.go new file mode 100644 index 00000000..56553b5a --- /dev/null +++ b/spectrocloud/data_source_appliance_test.go @@ -0,0 +1,55 @@ +package spectrocloud + +import ( + "context" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/stretchr/testify/assert" + "testing" +) + +func prepareBaseDataSourceApplianceSchema() *schema.ResourceData { + d := dataSourceAppliance().TestResourceData() + d.SetId("test123") + err := d.Set("name", "test-edge-01") + if err != nil { + return nil + } + err = d.Set("tags", map[string]string{"test": "true"}) + if err != nil { + return nil + } + err = d.Set("status", "ready") + if err != nil { + return nil + } + err = d.Set("health", "healthy") + if err != nil { + return nil + } + err = d.Set("architecture", "amd") + if err != nil { + return nil + } + return d +} + +func TestDataSourceApplianceReadFunc(t *testing.T) { + d := prepareBaseDataSourceApplianceSchema() + var diags diag.Diagnostics + + var ctx context.Context + diags = dataSourceApplianceRead(ctx, d, unitTestMockAPIClient) + assert.Equal(t, 0, len(diags)) +} + +func TestDataSourceApplianceReadNegativeFunc(t *testing.T) { + d := prepareBaseDataSourceApplianceSchema() + var diags diag.Diagnostics + + var ctx context.Context + diags = dataSourceApplianceRead(ctx, d, unitTestMockAPINegativeClient) + if assert.NotEmpty(t, diags, "Expected diags to contain at least one element") { + assert.Contains(t, diags[0].Summary, "No edge host found", "The first diagnostic message does not contain the expected error message") + } +} diff --git a/spectrocloud/data_source_appliances_test.go b/spectrocloud/data_source_appliances_test.go new file mode 100644 index 00000000..74dfb52b --- /dev/null +++ b/spectrocloud/data_source_appliances_test.go @@ -0,0 +1,53 @@ +package spectrocloud + +import ( + "context" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/stretchr/testify/assert" + "testing" +) + +func prepareBaseDataSourceAppliancesSchema() *schema.ResourceData { + d := dataSourceAppliances().TestResourceData() + d.SetId("test123") + err := d.Set("context", "project") + if err != nil { + return nil + } + err = d.Set("tags", map[string]string{"test": "true"}) + if err != nil { + return nil + } + err = d.Set("status", "ready") + if err != nil { + return nil + } + err = d.Set("health", "healthy") + if err != nil { + return nil + } + err = d.Set("architecture", "amd") + if err != nil { + return nil + } + return d +} + +func TestDataSourceAppliancesReadFunc(t *testing.T) { + d := prepareBaseDataSourceAppliancesSchema() + var diags diag.Diagnostics + + var ctx context.Context + diags = dataSourcesApplianceRead(ctx, d, unitTestMockAPIClient) + assert.Equal(t, 0, len(diags)) +} + +func TestDataSourceAppliancesReadNegativeFunc(t *testing.T) { + d := prepareBaseDataSourceAppliancesSchema() + var diags diag.Diagnostics + + var ctx context.Context + diags = dataSourcesApplianceRead(ctx, d, unitTestMockAPINegativeClient) + assertFirstDiagMessage(t, diags, "No edge host found") +} diff --git a/spectrocloud/data_source_application_profile_test.go b/spectrocloud/data_source_application_profile_test.go new file mode 100644 index 00000000..6cfecde0 --- /dev/null +++ b/spectrocloud/data_source_application_profile_test.go @@ -0,0 +1,17 @@ +package spectrocloud + +import ( + "context" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestDataSourceApplicationProfileRead(t *testing.T) { + resourceData := dataSourceApplicationProfile().TestResourceData() + _ = resourceData.Set("name", "test-application-profile") + + diags := dataSourceApplicationProfileRead(context.Background(), resourceData, unitTestMockAPIClient) + assert.Len(t, diags, 0) + assert.Equal(t, "test-application-profile", resourceData.Get("name").(string)) + +} diff --git a/spectrocloud/data_source_backup_storage_location.go b/spectrocloud/data_source_backup_storage_location.go index d47f79ab..6e90a0d6 100644 --- a/spectrocloud/data_source_backup_storage_location.go +++ b/spectrocloud/data_source_backup_storage_location.go @@ -2,10 +2,8 @@ package spectrocloud import ( "context" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/spectrocloud/palette-sdk-go/api/models" ) diff --git a/spectrocloud/data_source_backup_storage_location_test.go b/spectrocloud/data_source_backup_storage_location_test.go new file mode 100644 index 00000000..d7ff724e --- /dev/null +++ b/spectrocloud/data_source_backup_storage_location_test.go @@ -0,0 +1,71 @@ +package spectrocloud + +import ( + "context" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/spectrocloud/palette-sdk-go/api/models" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestDataSourceBackupStorageLocationRead(t *testing.T) { + tests := []struct { + name string + inputID string + inputName string + bsls []*models.V1UserAssetsLocation + expectedDiag diag.Diagnostics + expectedID string + expectedName string + expectingError bool + }{ + { + name: "Backup storage location not found", + inputID: "non-existent-uid", + bsls: []*models.V1UserAssetsLocation{{Metadata: &models.V1ObjectMeta{UID: "test-bsl-location-uid", Name: "test-bsl-location"}}}, + expectedDiag: diag.Diagnostics{ + { + Severity: diag.Error, + Summary: "Unable to find backup storage location", + Detail: "Unable to find the specified backup storage location", + }, + }, + expectingError: true, + }, + { + name: "Error setting name in state", + inputID: "test-bsl-location-uid", + bsls: []*models.V1UserAssetsLocation{{Metadata: &models.V1ObjectMeta{UID: "test-bsl-location-uid", Name: "test-bsl-location"}}}, + expectedDiag: diag.Diagnostics{ + { + Severity: diag.Error, + Summary: "Unable to find backup storage location", + Detail: "Unable to find the specified backup storage location", + }, + }, + expectingError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + resourceData := dataSourceBackupStorageLocation().TestResourceData() + + resourceData.SetId(tt.inputID) + if tt.inputName != "" { + resourceData.Set("name", tt.inputName) + } + + diags := dataSourceBackupStorageLocationRead(context.Background(), resourceData, unitTestMockAPIClient) + + if tt.expectingError { + assert.Equal(t, tt.expectedDiag, diags) + } else { + assert.Equal(t, "", diags) + assert.Equal(t, tt.expectedID, resourceData.Id()) + name, _ := resourceData.Get("name").(string) + assert.Equal(t, tt.expectedName, name) + } + }) + } +} diff --git a/spectrocloud/data_source_cloud_account_custom.go b/spectrocloud/data_source_cloud_account_custom.go index 4567a33e..29cb4998 100644 --- a/spectrocloud/data_source_cloud_account_custom.go +++ b/spectrocloud/data_source_cloud_account_custom.go @@ -57,7 +57,7 @@ func dataSourceCloudAccountCustomRead(_ context.Context, d *schema.ResourceData, if account == nil { diags = append(diags, diag.Diagnostic{ Severity: diag.Error, - Summary: "Unable to find aws cloud account", + Summary: "Unable to find cloud account", Detail: "Unable to find the specified aws cloud account", }) return diags diff --git a/spectrocloud/data_source_cloud_account_test.go b/spectrocloud/data_source_cloud_account_test.go new file mode 100644 index 00000000..7b94aa98 --- /dev/null +++ b/spectrocloud/data_source_cloud_account_test.go @@ -0,0 +1,269 @@ +package spectrocloud + +import ( + "context" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/stretchr/testify/assert" + "testing" +) + +func prepareBaseDataSourceTKEAccountSchema() *schema.ResourceData { + d := dataSourceCloudAccountTencent().TestResourceData() + return d +} + +func TestReadTKEAccountFuncName(t *testing.T) { + d := prepareBaseDataSourceTKEAccountSchema() + var diags diag.Diagnostics + + var ctx context.Context + _ = d.Set("name", "test-tke-account-1") + diags = dataSourceCloudAccountTencentRead(ctx, d, unitTestMockAPIClient) + assert.Equal(t, 0, len(diags)) +} +func TestReadTKEAccountFuncID(t *testing.T) { + d := prepareBaseDataSourceTKEAccountSchema() + var diags diag.Diagnostics + + var ctx context.Context + _ = d.Set("id", "test-tke-account-id-1") + diags = dataSourceCloudAccountTencentRead(ctx, d, unitTestMockAPIClient) + assert.Equal(t, 0, len(diags)) +} +func TestReadTKEAccountFuncNegative(t *testing.T) { + d := prepareBaseDataSourceTKEAccountSchema() + var diags diag.Diagnostics + + var ctx context.Context + _ = d.Set("name", "test-tke-account-1") + diags = dataSourceCloudAccountTencentRead(ctx, d, unitTestMockAPINegativeClient) + assertFirstDiagMessage(t, diags, "Unable to find tencent cloud account") +} + +func prepareBaseDataSourceAWSAccountSchema() *schema.ResourceData { + d := dataSourceCloudAccountAws().TestResourceData() + return d +} +func TestReadAWSAccountFuncName(t *testing.T) { + d := prepareBaseDataSourceAWSAccountSchema() + var diags diag.Diagnostics + + var ctx context.Context + _ = d.Set("name", "test-aws-account-1") + diags = dataSourceCloudAccountAwsRead(ctx, d, unitTestMockAPIClient) + assert.Equal(t, 0, len(diags)) +} +func TestReadAWSAccountFuncID(t *testing.T) { + d := prepareBaseDataSourceAWSAccountSchema() + var diags diag.Diagnostics + + var ctx context.Context + _ = d.Set("id", "test-aws-account-id-1") + diags = dataSourceCloudAccountAwsRead(ctx, d, unitTestMockAPIClient) + assert.Equal(t, 0, len(diags)) +} +func TestReadAWSAccountFuncNegative(t *testing.T) { + d := prepareBaseDataSourceAWSAccountSchema() + var diags diag.Diagnostics + + var ctx context.Context + _ = d.Set("name", "test-aws-account-1") + diags = dataSourceCloudAccountAwsRead(ctx, d, unitTestMockAPINegativeClient) + assertFirstDiagMessage(t, diags, "Unable to find aws cloud account") +} + +func prepareBaseDataSourceAzureAccountSchema() *schema.ResourceData { + d := dataSourceCloudAccountAzure().TestResourceData() + return d +} +func TestReadAzureAccountFuncName(t *testing.T) { + d := prepareBaseDataSourceAzureAccountSchema() + var diags diag.Diagnostics + + var ctx context.Context + _ = d.Set("name", "test-azure-account-1") + diags = dataSourceCloudAccountAzureRead(ctx, d, unitTestMockAPIClient) + assert.Equal(t, 0, len(diags)) +} +func TestReadAzureAccountFuncID(t *testing.T) { + d := prepareBaseDataSourceAzureAccountSchema() + var diags diag.Diagnostics + + var ctx context.Context + _ = d.Set("id", "test-azure-account-id-1") + diags = dataSourceCloudAccountAzureRead(ctx, d, unitTestMockAPIClient) + assert.Equal(t, 0, len(diags)) +} +func TestReadAzureAccountFuncNegative(t *testing.T) { + d := prepareBaseDataSourceAzureAccountSchema() + var diags diag.Diagnostics + + var ctx context.Context + _ = d.Set("name", "test-azure-account-1") + diags = dataSourceCloudAccountAzureRead(ctx, d, unitTestMockAPINegativeClient) + assertFirstDiagMessage(t, diags, "Unable to find azure cloud account") +} + +func prepareBaseDataSourceGcpAccountSchema() *schema.ResourceData { + d := dataSourceCloudAccountGcp().TestResourceData() + return d +} +func TestReadGcpAccountFuncName(t *testing.T) { + d := prepareBaseDataSourceGcpAccountSchema() + var diags diag.Diagnostics + + var ctx context.Context + _ = d.Set("name", "test-gcp-account-1") + diags = dataSourceCloudAccountGcpRead(ctx, d, unitTestMockAPIClient) + assert.Equal(t, 0, len(diags)) +} +func TestReadGcpAccountFuncID(t *testing.T) { + d := prepareBaseDataSourceGcpAccountSchema() + var diags diag.Diagnostics + + var ctx context.Context + _ = d.Set("id", "test-gcp-account-id-1") + diags = dataSourceCloudAccountGcpRead(ctx, d, unitTestMockAPIClient) + assert.Equal(t, 0, len(diags)) +} +func TestReadGcpAccountFuncNegative(t *testing.T) { + d := prepareBaseDataSourceGcpAccountSchema() + var diags diag.Diagnostics + + var ctx context.Context + _ = d.Set("name", "test-gcp-account-1") + diags = dataSourceCloudAccountGcpRead(ctx, d, unitTestMockAPINegativeClient) + assertFirstDiagMessage(t, diags, "Unable to find gcp cloud account") +} + +func prepareBaseDataSourceVsphereAccountSchema() *schema.ResourceData { + d := dataSourceCloudAccountVsphere().TestResourceData() + return d +} +func TestReadVsphereAccountFuncName(t *testing.T) { + d := prepareBaseDataSourceVsphereAccountSchema() + var diags diag.Diagnostics + + var ctx context.Context + _ = d.Set("name", "test-vsphere-account-1") + diags = dataSourceCloudAccountVsphereRead(ctx, d, unitTestMockAPIClient) + assert.Equal(t, 0, len(diags)) +} +func TestReadVsphereAccountFuncID(t *testing.T) { + d := prepareBaseDataSourceVsphereAccountSchema() + var diags diag.Diagnostics + + var ctx context.Context + _ = d.Set("id", "test-vsphere-account-id-1") + diags = dataSourceCloudAccountVsphereRead(ctx, d, unitTestMockAPIClient) + assert.Equal(t, 0, len(diags)) +} +func TestReadVsphereAccountFuncNegative(t *testing.T) { + d := prepareBaseDataSourceVsphereAccountSchema() + var diags diag.Diagnostics + + var ctx context.Context + _ = d.Set("name", "test-vsphere-account-1") + diags = dataSourceCloudAccountVsphereRead(ctx, d, unitTestMockAPINegativeClient) + assertFirstDiagMessage(t, diags, "Unable to find vsphere cloud account") +} + +func prepareBaseDataSourceOpenstackAccountSchema() *schema.ResourceData { + d := dataSourceCloudAccountOpenStack().TestResourceData() + return d +} +func TestReadOpenstackAccountFuncName(t *testing.T) { + d := prepareBaseDataSourceOpenstackAccountSchema() + var diags diag.Diagnostics + + var ctx context.Context + _ = d.Set("name", "test-openstack-account-1") + diags = dataSourceCloudAccountOpenStackRead(ctx, d, unitTestMockAPIClient) + assert.Equal(t, 0, len(diags)) +} +func TestReadOpenstackAccountFuncID(t *testing.T) { + d := prepareBaseDataSourceOpenstackAccountSchema() + var diags diag.Diagnostics + + var ctx context.Context + _ = d.Set("id", "test-openstack-account-id-1") + diags = dataSourceCloudAccountOpenStackRead(ctx, d, unitTestMockAPIClient) + assert.Equal(t, 0, len(diags)) +} +func TestReadOpenstackAccountFuncNegative(t *testing.T) { + d := prepareBaseDataSourceOpenstackAccountSchema() + var diags diag.Diagnostics + + var ctx context.Context + _ = d.Set("name", "test-openstack-account-1") + diags = dataSourceCloudAccountOpenStackRead(ctx, d, unitTestMockAPINegativeClient) + assertFirstDiagMessage(t, diags, "Unable to find openstack cloud account") +} + +func prepareBaseDataSourceMaasAccountSchema() *schema.ResourceData { + d := dataSourceCloudAccountMaas().TestResourceData() + return d +} +func TestReadMaasAccountFuncName(t *testing.T) { + d := prepareBaseDataSourceMaasAccountSchema() + var diags diag.Diagnostics + + var ctx context.Context + _ = d.Set("name", "test-maas-account-1") + diags = dataSourceCloudAccountMaasRead(ctx, d, unitTestMockAPIClient) + assert.Equal(t, 0, len(diags)) +} +func TestReadMaasAccountFuncID(t *testing.T) { + d := prepareBaseDataSourceMaasAccountSchema() + var diags diag.Diagnostics + + var ctx context.Context + _ = d.Set("id", "test-maas-account-id-1") + diags = dataSourceCloudAccountMaasRead(ctx, d, unitTestMockAPIClient) + assert.Equal(t, 0, len(diags)) +} +func TestReadMaasAccountFuncNegative(t *testing.T) { + d := prepareBaseDataSourceMaasAccountSchema() + var diags diag.Diagnostics + + var ctx context.Context + _ = d.Set("name", "test-maas-account-1") + diags = dataSourceCloudAccountMaasRead(ctx, d, unitTestMockAPINegativeClient) + assertFirstDiagMessage(t, diags, "Unable to find maas cloud account") +} + +func prepareBaseDataSourceCustomAccountSchema() *schema.ResourceData { + d := dataSourceCloudAccountCustom().TestResourceData() + return d +} +func TestReadCustomAccountFuncName(t *testing.T) { + d := prepareBaseDataSourceCustomAccountSchema() + var diags diag.Diagnostics + + var ctx context.Context + _ = d.Set("name", "test-custom-account-1") + _ = d.Set("cloud", "nutanix") + diags = dataSourceCloudAccountCustomRead(ctx, d, unitTestMockAPIClient) + assert.Equal(t, 0, len(diags)) +} +func TestReadCustomAccountFuncID(t *testing.T) { + d := prepareBaseDataSourceCustomAccountSchema() + var diags diag.Diagnostics + + var ctx context.Context + _ = d.Set("id", "test-custom-account-id-1") + _ = d.Set("cloud", "nutanix") + diags = dataSourceCloudAccountCustomRead(ctx, d, unitTestMockAPIClient) + assert.Equal(t, 0, len(diags)) +} +func TestReadCustomAccountFuncNegative(t *testing.T) { + d := prepareBaseDataSourceCustomAccountSchema() + var diags diag.Diagnostics + + var ctx context.Context + _ = d.Set("name", "test-custom-account-1") + _ = d.Set("cloud", "nutanix") + diags = dataSourceCloudAccountCustomRead(ctx, d, unitTestMockAPINegativeClient) + assertFirstDiagMessage(t, diags, "Unable to find cloud account") +} diff --git a/spectrocloud/data_source_cluster_group_test.go b/spectrocloud/data_source_cluster_group_test.go new file mode 100644 index 00000000..213c9573 --- /dev/null +++ b/spectrocloud/data_source_cluster_group_test.go @@ -0,0 +1,54 @@ +package spectrocloud + +import ( + "context" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestDataSourceClusterGroupRead_SystemContext(t *testing.T) { + + resourceData := dataSourceClusterGroup().TestResourceData() + _ = resourceData.Set("name", "test-cluster-group") + _ = resourceData.Set("context", "system") + diags := dataSourceClusterGroupRead(context.Background(), resourceData, unitTestMockAPIClient) + + assert.Len(t, diags, 0) + assert.Equal(t, "test-cluster-group", resourceData.Get("name").(string)) + assert.NotEmpty(t, resourceData.Id()) +} + +func TestDataSourceClusterGroupRead_TenantContext(t *testing.T) { + resourceData := dataSourceClusterGroup().TestResourceData() + _ = resourceData.Set("name", "test-cluster-group") + _ = resourceData.Set("context", "tenant") + + diags := dataSourceClusterGroupRead(context.Background(), resourceData, unitTestMockAPIClient) + + assert.Len(t, diags, 0) + assert.Equal(t, "test-cluster-group", resourceData.Get("name").(string)) + assert.NotEmpty(t, resourceData.Id()) +} + +func TestDataSourceClusterGroupRead_ProjectContext(t *testing.T) { + resourceData := dataSourceClusterGroup().TestResourceData() + _ = resourceData.Set("name", "test-cluster-group") + _ = resourceData.Set("context", "project") + + diags := dataSourceClusterGroupRead(context.Background(), resourceData, unitTestMockAPIClient) + + assert.Len(t, diags, 0) + assert.Equal(t, "test-cluster-group", resourceData.Get("name").(string)) + assert.NotEmpty(t, resourceData.Id()) +} + +func TestDataSourceClusterGroupRead_InvalidContext(t *testing.T) { + resourceData := dataSourceClusterGroup().TestResourceData() + _ = resourceData.Set("name", "test-cluster-group") + _ = resourceData.Set("context", "other") + + diags := dataSourceClusterGroupRead(context.Background(), resourceData, unitTestMockAPIClient) + + assert.Len(t, diags, 0) + assert.Equal(t, "", resourceData.Id()) +} diff --git a/spectrocloud/data_source_cluster_profile_test.go b/spectrocloud/data_source_cluster_profile_test.go new file mode 100644 index 00000000..a42f3e2e --- /dev/null +++ b/spectrocloud/data_source_cluster_profile_test.go @@ -0,0 +1,83 @@ +package spectrocloud + +import ( + "context" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/stretchr/testify/assert" + "testing" +) + +func prepareBaseDataSourceClusterProfileSchema() *schema.ResourceData { + d := dataSourceClusterProfile().TestResourceData() + return d +} + +func TestReadClusterProfileFuncName(t *testing.T) { + d := prepareBaseDataSourceClusterProfileSchema() + var diags diag.Diagnostics + + var ctx context.Context + _ = d.Set("context", "project") + _ = d.Set("name", "test-cluster-profile-1") + diags = dataSourceClusterProfileRead(ctx, d, unitTestMockAPIClient) + assert.Equal(t, 0, len(diags)) +} +func TestReadClusterProfileFuncId(t *testing.T) { + d := prepareBaseDataSourceClusterProfileSchema() + var diags diag.Diagnostics + var ctx context.Context + _ = d.Set("context", "project") + _ = d.Set("id", "test-uid") + _ = d.Set("name", "test-cluster-profile-1") + diags = dataSourceClusterProfileRead(ctx, d, unitTestMockAPIClient) + assert.Equal(t, 0, len(diags)) +} + +func TestReadClusterProfileFuncPacks(t *testing.T) { + d := prepareBaseDataSourceClusterProfileSchema() + var diags diag.Diagnostics + var ctx context.Context + _ = d.Set("context", "project") + _ = d.Set("id", "test-uid") + _ = d.Set("name", "test-cluster-profile-1") + + var packs []interface{} + packs = append(packs, map[string]interface{}{ + "name": "test-pack-1", + "type": "spectro", + "tag": "v1.0", + "uid": "test-uid", + "registry_uid": "test-registry-uid", + "values": "test-values", + "manifest": []interface{}{}, + }) + manifest := map[string]string{ + "name": "packmanifest", + "content": "manifest-content", + } + packs = append(packs, map[string]interface{}{ + "name": "test-pack-2", + "type": "spectro", + "tag": "v1.0", + "uid": "test-uid", + "registry_uid": "test-registry-uid", + "values": "test-values", + "manifest": []interface{}{manifest}, + }) + _ = d.Set("pack", packs) + diags = dataSourceClusterProfileRead(ctx, d, unitTestMockAPIClient) + assert.Equal(t, 0, len(diags)) +} + +func TestReadClusterProfileFuncNameNegative(t *testing.T) { + d := prepareBaseDataSourceClusterProfileSchema() + var diags diag.Diagnostics + + var ctx context.Context + _ = d.Set("context", "project") + _ = d.Set("name", "test-cluster-profile-1") + + diags = dataSourceClusterProfileRead(ctx, d, unitTestMockAPINegativeClient) + assertFirstDiagMessage(t, diags, "cluster profile not found") +} diff --git a/spectrocloud/data_source_cluster_test.go b/spectrocloud/data_source_cluster_test.go new file mode 100644 index 00000000..9c1a0707 --- /dev/null +++ b/spectrocloud/data_source_cluster_test.go @@ -0,0 +1,77 @@ +package spectrocloud + +import ( + "context" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestDataSourceClusterRead(t *testing.T) { + tests := []struct { + name string + resourceData *schema.ResourceData + expectedError bool + expectedDiags diag.Diagnostics + }{ + { + name: "Successful read", + resourceData: schema.TestResourceDataRaw(t, map[string]*schema.Schema{ + "name": {Type: schema.TypeString, Required: true}, + "context": {Type: schema.TypeString, Required: true}, + "virtual": {Type: schema.TypeBool, Optional: true}, + "kube_config": {Type: schema.TypeString, Computed: true}, + "admin_kube_config": {Type: schema.TypeString, Computed: true}, + }, map[string]interface{}{ + "name": "test-cluster", + "context": "some-context", + "virtual": false, + }), + expectedError: false, + expectedDiags: diag.Diagnostics{}, + }, + { + name: "Cluster not found", + resourceData: schema.TestResourceDataRaw(t, map[string]*schema.Schema{ + "name": {Type: schema.TypeString, Required: true}, + "context": {Type: schema.TypeString, Required: true}, + "virtual": {Type: schema.TypeBool, Optional: true}, + "kube_config": {Type: schema.TypeString, Computed: true}, + "admin_kube_config": {Type: schema.TypeString, Computed: true}, + }, map[string]interface{}{ + "name": "test-cluster", + "context": "some-context", + "virtual": false, + "kube_config": "", + "admin_kube_config": "", + }), + expectedError: false, + expectedDiags: diag.Diagnostics{ + diag.Diagnostic{ + Severity: diag.Error, + Summary: "Cluster not found", + Detail: "The cluster 'test-cluster' was not found.", + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + + d := tt.resourceData + var ctx context.Context + diags := dataSourceClusterRead(ctx, d, unitTestMockAPIClient) + + if tt.expectedError { + assert.NotEmpty(t, diags) + } else { + assert.Empty(t, diags) + } + if d.Id() != "" { + assert.Equal(t, "test-cluster-id", d.Id()) + } + }) + } +} diff --git a/spectrocloud/data_source_filter_test.go b/spectrocloud/data_source_filter_test.go new file mode 100644 index 00000000..1181b138 --- /dev/null +++ b/spectrocloud/data_source_filter_test.go @@ -0,0 +1,45 @@ +package spectrocloud + +import ( + "context" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/stretchr/testify/assert" + "testing" +) + +func prepareBaseFilterResourceData() *schema.ResourceData { + d := dataSourceFilter().TestResourceData() + meta := make([]map[string]interface{}, 0) + meta = append(meta, map[string]interface{}{ + "name": "test-filter-1", + "annotations": map[string]string{ + "tag": "unit-test", + }, + "labels": map[string]string{ + "label": "unit-test", + }, + }) + err := d.Set("name", "test-filter-1") + if err != nil { + return nil + } + err = d.Set("metadata", meta) + if err != nil { + return nil + } + return d +} + +func TestDataSourceFilterRead(t *testing.T) { + d := prepareBaseFilterResourceData() + ctx := context.Background() + diags := dataSourceFilterRead(ctx, d, unitTestMockAPIClient) + assert.Empty(t, diags) +} + +func TestDataSourceFilterNegativeRead(t *testing.T) { + d := prepareBaseFilterResourceData() + ctx := context.Background() + diags := dataSourceFilterRead(ctx, d, unitTestMockAPINegativeClient) + assertFirstDiagMessage(t, diags, "filter not found") +} diff --git a/spectrocloud/data_source_macros.go b/spectrocloud/data_source_macros.go deleted file mode 100644 index 0a91e3f9..00000000 --- a/spectrocloud/data_source_macros.go +++ /dev/null @@ -1,38 +0,0 @@ -package spectrocloud - -// import ( -// "context" - -// "github.com/hashicorp/terraform-plugin-sdk/v2/diag" -// "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - -// "github.com/spectrocloud/palette-sdk-go/client" -// ) - -// func dataSourceMacros() *schema.Resource { -// return &schema.Resource{ -// ReadContext: dataSourceProjectRead, - -// Schema: map[string]*schema.Schema{ -// "project": { -// Type: schema.TypeString, -// Computed: true, -// Optional: true, -// }, -// }, -// } -// } - -// func dataSourceMacrosRead(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { -// c := m.(*client.V1Client) -// var diags diag.Diagnostics -// if v, ok := d.GetOk("project"); ok && v.(string) != "" { -// uid, err := c.GetProjectUID(v.(string)) -// if err != nil { -// return diag.FromErr(err) -// } -// d.SetId(uid) -// d.Set("name", v.(string)) -// } -// return diags -// } diff --git a/spectrocloud/data_source_pack_simple_test.go b/spectrocloud/data_source_pack_simple_test.go new file mode 100644 index 00000000..d903e6f0 --- /dev/null +++ b/spectrocloud/data_source_pack_simple_test.go @@ -0,0 +1,46 @@ +package spectrocloud + +import ( + "context" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/stretchr/testify/assert" + "testing" +) + +func prepareBaseDataSourceSimplePackResourceData() *schema.ResourceData { + d := dataSourcePackSimple().TestResourceData() + _ = d.Set("name", "k8") + return d +} + +func TestDataSourceSimplePacksReadManifest(t *testing.T) { + d := prepareBaseDataSourceSimplePackResourceData() + _ = d.Set("type", "manifest") + diags := dataSourcePackReadSimple(context.Background(), d, unitTestMockAPIClient) + assert.Empty(t, diags) +} + +func TestDataSourceSimplePacksReadManifestWithoutReg(t *testing.T) { + d := prepareBaseDataSourceSimplePackResourceData() + _ = d.Set("type", "other") + diags := dataSourcePackReadSimple(context.Background(), d, unitTestMockAPIClient) + assertFirstDiagMessage(t, diags, "No registry uid provided.") +} + +func TestDataSourceSimplePacksRead(t *testing.T) { + d := prepareBaseDataSourceSimplePackResourceData() + _ = d.Set("type", "other") + _ = d.Set("registry_uid", "test-reg-uid") + _ = d.Set("version", "1.0") + diags := dataSourcePackReadSimple(context.Background(), d, unitTestMockAPIClient) + assert.Empty(t, diags) +} + +func TestDataSourceSimplePacksReadNoPackFound(t *testing.T) { + d := prepareBaseDataSourceSimplePackResourceData() + _ = d.Set("type", "other") + _ = d.Set("registry_uid", "test-reg-uid") + _ = d.Set("version", "1.0") + diags := dataSourcePackReadSimple(context.Background(), d, unitTestMockAPINegativeClient) + assertFirstDiagMessage(t, diags, "No values for pack found.") +} diff --git a/spectrocloud/data_source_pack_test.go b/spectrocloud/data_source_pack_test.go new file mode 100644 index 00000000..544615ae --- /dev/null +++ b/spectrocloud/data_source_pack_test.go @@ -0,0 +1,50 @@ +package spectrocloud + +import ( + "context" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/stretchr/testify/assert" + "testing" +) + +func prepareBaseDataSourcePackResourceData() *schema.ResourceData { + d := dataSourcePack().TestResourceData() + d.SetId("test-pack-1") + _ = d.Set("type", "manifest") + return d +} + +func TestDataSourcePacksReadManifest(t *testing.T) { + d := prepareBaseDataSourcePackResourceData() + diags := dataSourcePackRead(context.Background(), d, unitTestMockAPIClient) + assert.Empty(t, diags) +} + +func TestDataSourcePacksReadOci(t *testing.T) { + d := prepareBaseDataSourcePackResourceData() + _ = d.Set("type", "oci") + _ = d.Set("registry_uid", "test-reg-uid") + diags := dataSourcePackRead(context.Background(), d, unitTestMockAPIClient) + assert.Empty(t, diags) +} + +func TestDataSourcePacksReadHelm(t *testing.T) { + d := prepareBaseDataSourcePackResourceData() + _ = d.Set("type", "helm") + _ = d.Set("name", "k8") + _ = d.Set("registry_uid", "test-reg-uid") + _ = d.Set("filters", "spec.cloudTypes=edge-nativeANDspec.layer=cniANDspec.displayName=CalicoANDspec.version>3.26.9ANDspec.registryUid=${data.spectrocloud_registry.palette_registry_oci.id}") + diags := dataSourcePackRead(context.Background(), d, unitTestMockAPIClient) + assert.Empty(t, diags) +} + +func TestDataSourcePacksReadHelmMultiPacks(t *testing.T) { + d := prepareBaseDataSourcePackResourceData() + _ = d.Set("type", "helm") + _ = d.Set("name", "k8") + _ = d.Set("registry_uid", "test-reg-uid") + _ = d.Set("filters", "spec.cloudTypes=edge-nativeANDspec.layer=cniANDspec.displayName=CalicoANDspec.version>3.26.9ANDspec.registryUid=${data.spectrocloud_registry.palette_registry_oci.id}") + diags := dataSourcePackRead(context.Background(), d, unitTestMockAPINegativeClient) + assertFirstDiagMessage(t, diags, "Multiple packs returned") + +} diff --git a/spectrocloud/data_source_pcg_ippool_test.go b/spectrocloud/data_source_pcg_ippool_test.go new file mode 100644 index 00000000..01708d15 --- /dev/null +++ b/spectrocloud/data_source_pcg_ippool_test.go @@ -0,0 +1,26 @@ +package spectrocloud + +import ( + "context" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestDataSourceIpPoolRead(t *testing.T) { + resourceData := dataSourcePrivateCloudGatewayIpPool().TestResourceData() + _ = resourceData.Set("private_cloud_gateway_id", "test-pcg-id") + _ = resourceData.Set("name", "test-name") + + diags := dataSourceIpPoolRead(context.Background(), resourceData, unitTestMockAPIClient) + + assert.Len(t, diags, 0) + assert.Equal(t, "test-pcg-id", resourceData.Get("private_cloud_gateway_id").(string)) + assert.Equal(t, "test-name", resourceData.Get("name").(string)) +} + +func TestDataSourceIpPoolRead_MissingFields(t *testing.T) { + resourceData := dataSourcePrivateCloudGatewayIpPool().TestResourceData() + diags := dataSourceIpPoolRead(context.Background(), resourceData, unitTestMockAPIClient) + + assert.NotEmpty(t, diags) +} diff --git a/spectrocloud/data_source_private_cloud_gateway_test.go b/spectrocloud/data_source_private_cloud_gateway_test.go new file mode 100644 index 00000000..11d0a534 --- /dev/null +++ b/spectrocloud/data_source_private_cloud_gateway_test.go @@ -0,0 +1,25 @@ +package spectrocloud + +import ( + "context" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestDataSourcePCGRead(t *testing.T) { + resourceData := dataSourcePCG().TestResourceData() + _ = resourceData.Set("name", "test-pcg-name") + diags := dataSourcePCGRead(context.Background(), resourceData, unitTestMockAPIClient) + + assert.Len(t, diags, 0) + assert.Equal(t, "test-pcg-name", resourceData.Get("name").(string)) + assert.NotEmpty(t, resourceData.Id()) +} + +func TestDataSourcePCGRead_MissingName(t *testing.T) { + resourceData := dataSourcePCG().TestResourceData() + + diags := dataSourcePCGRead(context.Background(), resourceData, unitTestMockAPIClient) + + assert.Empty(t, diags) +} diff --git a/spectrocloud/data_source_project_test.go b/spectrocloud/data_source_project_test.go new file mode 100644 index 00000000..cc57188a --- /dev/null +++ b/spectrocloud/data_source_project_test.go @@ -0,0 +1,25 @@ +package spectrocloud + +import ( + "context" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestDataSourceProjectRead(t *testing.T) { + + d := schema.TestResourceDataRaw(t, map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + }, map[string]interface{}{ + "name": "Default", + }) + + diags := dataSourceProjectRead(context.Background(), d, unitTestMockAPIClient) + + assert.Empty(t, diags) + assert.Equal(t, "Default", d.Get("name")) +} diff --git a/spectrocloud/data_source_registry_pack_test.go b/spectrocloud/data_source_registry_pack_test.go new file mode 100644 index 00000000..a4c28986 --- /dev/null +++ b/spectrocloud/data_source_registry_pack_test.go @@ -0,0 +1,40 @@ +package spectrocloud + +import ( + "context" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestDataSourceRegistryPackRead(t *testing.T) { + resourceData := dataSourceRegistryPack().TestResourceData() + _ = resourceData.Set("name", "test-registry-name") + diags := dataSourceRegistryPackRead(context.Background(), resourceData, unitTestMockAPIClient) + assert.Equal(t, "test-registry-name", resourceData.Get("name").(string)) + assert.Empty(t, diags) +} + +func TestDataSourceHelmRegistryPackRead(t *testing.T) { + resourceData := dataSourceRegistryHelm().TestResourceData() + _ = resourceData.Set("name", "Public") + diags := dataSourceRegistryHelmRead(context.Background(), resourceData, unitTestMockAPIClient) + assert.Equal(t, "Public", resourceData.Get("name").(string)) + assert.Empty(t, diags) +} + +func TestDataSourceOciRegistryPackRead(t *testing.T) { + resourceData := dataSourceRegistryOci().TestResourceData() + _ = resourceData.Set("name", "test-registry-oci") + diags := dataSourceRegistryOciRead(context.Background(), resourceData, unitTestMockAPIClient) + assert.Equal(t, "test-registry-oci"+ + "", resourceData.Get("name").(string)) + assert.Empty(t, diags) +} + +func TestDataSourceBasicRegistryPackRead(t *testing.T) { + resourceData := dataSourceRegistry().TestResourceData() + _ = resourceData.Set("name", "test-registry-name") + diags := dataSourceRegistryRead(context.Background(), resourceData, unitTestMockAPIClient) + assert.Equal(t, "test-registry-name", resourceData.Get("name").(string)) + assert.Empty(t, diags) +} diff --git a/spectrocloud/data_source_role_test.go b/spectrocloud/data_source_role_test.go new file mode 100644 index 00000000..3198535e --- /dev/null +++ b/spectrocloud/data_source_role_test.go @@ -0,0 +1,36 @@ +package spectrocloud + +import ( + "context" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/stretchr/testify/assert" + "testing" +) + +func prepareBaseRoleResourceData() *schema.ResourceData { + d := dataSourceRole().TestResourceData() + err := d.Set("name", "test-role") + if err != nil { + return nil + } + return d +} + +func TestDataSourceRoleRead(t *testing.T) { + d := prepareBaseRoleResourceData() + diags := dataSourceRoleRead(context.Background(), d, unitTestMockAPIClient) + assert.Empty(t, diags) +} + +func TestDataSourceRoleErrorRead(t *testing.T) { + d := prepareBaseRoleResourceData() + _ = d.Set("name", "test-role-invalid") + diags := dataSourceRoleRead(context.Background(), d, unitTestMockAPIClient) + assertFirstDiagMessage(t, diags, "role 'test-role-invalid' not found") +} + +func TestDataSourceRoleNegativeRead(t *testing.T) { + d := prepareBaseRoleResourceData() + diags := dataSourceRoleRead(context.Background(), d, unitTestMockAPINegativeClient) + assertFirstDiagMessage(t, diags, "No roles are found") +} diff --git a/spectrocloud/data_source_user_test.go b/spectrocloud/data_source_user_test.go new file mode 100644 index 00000000..a578c299 --- /dev/null +++ b/spectrocloud/data_source_user_test.go @@ -0,0 +1,39 @@ +package spectrocloud + +import ( + "context" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/stretchr/testify/assert" + "testing" +) + +func prepareBaseUserResourceData() *schema.ResourceData { + d := dataSourceUser().TestResourceData() + err := d.Set("email", "test@spectrocloud.com") + if err != nil { + return nil + } + return d +} + +func TestDataSourceUserRead(t *testing.T) { + // Initialize ResourceData with a test email + resourceData := prepareBaseUserResourceData() + + // Call the dataSourceUserRead function + diags := dataSourceUserRead(context.Background(), resourceData, unitTestMockAPIClient) + + // Assertions + assert.Equal(t, "12345", resourceData.Id()) + assert.NoError(t, resourceData.Set("email", "test@spectrocloud.com")) + assert.Empty(t, diags) +} + +func TestDataSourceUserNegativeRead(t *testing.T) { + // Initialize ResourceData with a test email + resourceData := prepareBaseUserResourceData() + // Call the dataSourceUserRead function + diags := dataSourceUserRead(context.Background(), resourceData, unitTestMockAPINegativeClient) + assertFirstDiagMessage(t, diags, "User not found") + +} diff --git a/spectrocloud/data_source_workspace_test.go b/spectrocloud/data_source_workspace_test.go new file mode 100644 index 00000000..71810166 --- /dev/null +++ b/spectrocloud/data_source_workspace_test.go @@ -0,0 +1,26 @@ +package spectrocloud + +import ( + "context" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestDataSourceWorkspaceRead(t *testing.T) { + + resourceData := dataSourceWorkspace().TestResourceData() + _ = resourceData.Set("name", "test-workspace") + diags := dataSourceWorkspaceRead(context.Background(), resourceData, unitTestMockAPIClient) + + assert.Len(t, diags, 0) + assert.Equal(t, "test-workspace", resourceData.Get("name").(string)) + assert.NotEmpty(t, resourceData.Id()) +} + +func TestDataSourceWorkspaceRead_MissingName(t *testing.T) { + resourceData := dataSourceWorkspace().TestResourceData() + + diags := dataSourceWorkspaceRead(context.Background(), resourceData, unitTestMockAPIClient) + + assert.Empty(t, diags) +} diff --git a/spectrocloud/data_volume_schema_test.go b/spectrocloud/data_volume_schema_test.go index 447232a2..429435af 100644 --- a/spectrocloud/data_volume_schema_test.go +++ b/spectrocloud/data_volume_schema_test.go @@ -89,68 +89,6 @@ func prepareDataVolumeTestData() *schema.ResourceData { return rd } -//func TestCreateDataVolume(t *testing.T) { -// rd := prepareDataVolumeTestData() -// -// m := &client.V1Client{} -// -// ctx := context.Background() -// resourceKubevirtDataVolumeCreate(ctx, rd, m) -//} -// -//func TestDeleteDataVolume(t *testing.T) { -// var diags diag.Diagnostics -// assert := assert.New(t) -// rd := prepareDataVolumeTestData() -// -// m := &client.V1Client{} -// -// ctx := context.Background() -// diags = resourceKubevirtDataVolumeDelete(ctx, rd, m) -// if diags.HasError() { -// assert.Error(errors.New("delete operation failed")) -// } else { -// assert.NoError(nil) -// } -//} -// -//func TestReadDataVolumeWithoutStatus(t *testing.T) { -// assert := assert.New(t) -// rd := prepareDataVolumeTestData() -// rd.SetId("project/cluster-123/default/vm-test/vol-test") -// m := &client.V1Client{} -// -// ctx := context.Background() -// diags := resourceKubevirtDataVolumeRead(ctx, rd, m) -// if diags.HasError() { -// assert.Error(errors.New("read operation failed")) -// } else { -// assert.NoError(nil) -// } -// -// // Read from metadata block -// metadata := rd.Get("metadata").([]interface{})[0].(map[string]interface{}) -// -// // Check that the resource data has been updated correctly -// assert.Equal("vol-test", metadata["name"]) -// assert.Equal("default", metadata["namespace"]) -//} -// -//func TestReadDataVolume(t *testing.T) { -// assert := assert.New(t) -// rd := prepareDataVolumeTestData() -// -// m := &client.V1Client{} -// -// ctx := context.Background() -// diags := resourceKubevirtDataVolumeRead(ctx, rd, m) -// if diags.HasError() { -// assert.Error(errors.New("read operation failed")) -// } else { -// assert.NoError(nil) -// } -//} - func TestExpandAddVolumeOptions(t *testing.T) { assert := assert.New(t) diff --git a/spectrocloud/kubevirt/schema/datavolume/data_volume_test.go b/spectrocloud/kubevirt/schema/datavolume/data_volume_test.go new file mode 100644 index 00000000..76e549c9 --- /dev/null +++ b/spectrocloud/kubevirt/schema/datavolume/data_volume_test.go @@ -0,0 +1,110 @@ +package datavolume + +import ( + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/stretchr/testify/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + cdiv1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1" + "testing" +) + +func TestDataVolumeFields(t *testing.T) { + fields := DataVolumeFields() + assert.NotNil(t, fields) + assert.Contains(t, fields, "cluster_uid") + assert.Contains(t, fields, "cluster_context") + assert.Contains(t, fields, "vm_name") + assert.Contains(t, fields, "vm_namespace") + assert.Contains(t, fields, "add_volume_options") + assert.Contains(t, fields, "metadata") + assert.Contains(t, fields, "spec") + assert.Contains(t, fields, "status") +} + +func TestFromResourceData(t *testing.T) { + resourceData := schema.TestResourceDataRaw(t, DataVolumeFields(), map[string]interface{}{ + "metadata": []interface{}{ + map[string]interface{}{ + "name": "test-dv", + "namespace": "default", + }, + }, + "spec": []interface{}{ + map[string]interface{}{ + "source": map[string]interface{}{ + "blank": map[string]interface{}{}, + }, + }, + }, + }) + + dataVolume, err := FromResourceData(resourceData) + assert.NoError(t, err) + assert.NotNil(t, dataVolume) + assert.Equal(t, "test-dv", dataVolume.Name) + assert.Equal(t, "default", dataVolume.Namespace) +} + +func TestToResourceData(t *testing.T) { + dv := cdiv1.DataVolume{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{}, + Spec: cdiv1.DataVolumeSpec{ + PriorityClassName: "test-pirioirty", + ContentType: "bil", + Checkpoints: nil, + FinalCheckpoint: false, + Preallocation: nil, + }, + Status: cdiv1.DataVolumeStatus{}, + } + + resourceData := schema.TestResourceDataRaw(t, DataVolumeFields(), map[string]interface{}{}) + err := ToResourceData(dv, resourceData) + assert.NoError(t, err) +} + +func TestExpandDataVolumeStatus(t *testing.T) { + tests := []struct { + input []interface{} + expected cdiv1.DataVolumeStatus + }{ + { + input: []interface{}{ + map[string]interface{}{ + "phase": "Succeeded", + "progress": "50%", + }, + }, + expected: cdiv1.DataVolumeStatus{ + Phase: cdiv1.DataVolumePhase("Succeeded"), + Progress: cdiv1.DataVolumeProgress("50%"), + }, + }, + { + input: []interface{}{nil}, + expected: cdiv1.DataVolumeStatus{}, + }, + { + input: []interface{}{}, + expected: cdiv1.DataVolumeStatus{}, + }, + { + input: []interface{}{ + map[string]interface{}{ + "phase": "Failed", + }, + }, + expected: cdiv1.DataVolumeStatus{ + Phase: cdiv1.DataVolumePhase("Failed"), + }, + }, + } + + for _, tt := range tests { + t.Run("", func(t *testing.T) { + result := expandDataVolumeStatus(tt.input) + assert.Equal(t, tt.expected, result) + }) + } +} diff --git a/spectrocloud/kubevirt/schema/k8s/k8s_utils_test.go b/spectrocloud/kubevirt/schema/k8s/k8s_utils_test.go new file mode 100644 index 00000000..2d67c32b --- /dev/null +++ b/spectrocloud/kubevirt/schema/k8s/k8s_utils_test.go @@ -0,0 +1,223 @@ +package k8s + +import ( + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/spectrocloud/terraform-provider-spectrocloud/spectrocloud/kubevirt/utils" + "github.com/stretchr/testify/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "testing" +) + +func TestNamespacedMetadataSchemaIsTemplate(t *testing.T) { + tests := []struct { + objectName string + generatableName bool + isTemplate bool + expectedFields map[string]*schema.Schema + }{ + { + objectName: "pod", + generatableName: true, + isTemplate: false, + expectedFields: map[string]*schema.Schema{ + "namespace": { + Type: schema.TypeString, + Description: "Namespace defines the space within which name of the pod must be unique.", + Optional: true, + ForceNew: true, + Default: "default", + }, + "generate_name": { + Type: schema.TypeString, + Description: "Prefix, used by the server, to generate a unique name ONLY IF the `name` field has not been provided. This value will also be combined with a unique suffix. Read more: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#idempotency", + Optional: true, + ValidateFunc: utils.ValidateGenerateName, + ConflictsWith: []string{"metadata.name"}, + }, + "name": { + Type: schema.TypeString, + Description: "Name of the pod.", + Optional: true, + ForceNew: true, + }, + }, + }, + { + objectName: "service", + generatableName: false, + isTemplate: true, + expectedFields: map[string]*schema.Schema{ + "namespace": { + Type: schema.TypeString, + Description: "Namespace defines the space within which name of the service must be unique.", + Optional: true, + ForceNew: true, + Default: nil, + }, + "generate_name": nil, + "name": { + Type: schema.TypeString, + Description: "Name of the service.", + Optional: true, + ForceNew: true, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.objectName, func(t *testing.T) { + _ = namespacedMetadataSchemaIsTemplate(tt.objectName, tt.generatableName, tt.isTemplate) + }) + } +} + +func TestConvertToBasicMetadata(t *testing.T) { + tests := []struct { + name string + resourceData *schema.ResourceData + expectedMeta metav1.ObjectMeta + }{ + { + name: "complete metadata", + resourceData: &schema.ResourceData{ + // Initialize the ResourceData with the necessary values + // For example, use a mock or set values directly + // Assuming `schema.ResourceData` has methods like `Set`, `GetOk` etc. + }, + expectedMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "key1": "value1", + }, + Labels: map[string]string{ + "label1": "value1", + }, + GenerateName: "gen-name", + Name: "name", + Namespace: "namespace", + ResourceVersion: "resource-version", + }, + }, + { + name: "partial metadata", + resourceData: &schema.ResourceData{ + // Initialize the ResourceData with only some values + }, + expectedMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "key1": "value1", + }, + Labels: map[string]string{ + "label1": "value1", + }, + Name: "name", + }, + }, + { + name: "empty metadata", + resourceData: &schema.ResourceData{ + // Initialize the ResourceData with no values + }, + expectedMeta: metav1.ObjectMeta{}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + _ = ConvertToBasicMetadata(tt.resourceData) + }) + } +} + +func TestExpandLabelSelectorRequirement(t *testing.T) { + tests := []struct { + name string + input []interface{} + expected []metav1.LabelSelectorRequirement + }{ + { + name: "valid requirements", + input: []interface{}{ + map[string]interface{}{ + "key": "key1", + "operator": "In", + "values": schema.NewSet(schema.HashString, []interface{}{"value1", "value2"}), + }, + map[string]interface{}{ + "key": "key2", + "operator": "NotIn", + "values": schema.NewSet(schema.HashString, []interface{}{"value3"}), + }, + }, + expected: []metav1.LabelSelectorRequirement{ + { + Key: "key1", + Operator: metav1.LabelSelectorOperator("In"), + Values: []string{"value1", "value2"}, + }, + { + Key: "key2", + Operator: metav1.LabelSelectorOperator("NotIn"), + Values: []string{"value3"}, + }, + }, + }, + { + name: "empty input", + input: []interface{}{}, + expected: []metav1.LabelSelectorRequirement{}, + }, + { + name: "nil input", + input: []interface{}{ + nil, + }, + expected: []metav1.LabelSelectorRequirement{}, + }, + { + name: "invalid input", + input: []interface{}{ + map[string]interface{}{ + "key": "key1", + "operator": "InvalidOperator", + "values": schema.NewSet(schema.HashString, []interface{}{"value1"}), + }, + }, + expected: []metav1.LabelSelectorRequirement{ + { + Key: "key1", + Operator: metav1.LabelSelectorOperator("InvalidOperator"), + Values: []string{"value1"}, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := expandLabelSelectorRequirement(tt.input) + assert.ElementsMatch(t, tt.expected, result) + }) + } +} + +func TestFlattenLabelSelectorRequirement(t *testing.T) { + tests := []struct { + name string + input []metav1.LabelSelectorRequirement + expected []interface{} + }{ + { + name: "empty input", + input: []metav1.LabelSelectorRequirement{}, + expected: []interface{}{}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := flattenLabelSelectorRequirement(tt.input) + assert.ElementsMatch(t, tt.expected, result) + }) + } +} diff --git a/spectrocloud/kubevirt/schema/virtualmachine/virtualmachines_test.go b/spectrocloud/kubevirt/schema/virtualmachine/virtualmachines_test.go new file mode 100644 index 00000000..5421c8a2 --- /dev/null +++ b/spectrocloud/kubevirt/schema/virtualmachine/virtualmachines_test.go @@ -0,0 +1,329 @@ +package virtualmachine + +import ( + "github.com/spectrocloud/terraform-provider-spectrocloud/spectrocloud/kubevirt/utils" + "github.com/stretchr/testify/assert" + k8sv1 "k8s.io/api/core/v1" + k8stypes "k8s.io/apimachinery/pkg/types" + kubevirtapiv1 "kubevirt.io/api/core/v1" + "testing" +) + +func TestExpandVirtualMachineConditions(t *testing.T) { + tests := []struct { + name string + input []interface{} + expected []kubevirtapiv1.VirtualMachineCondition + wantErr bool + }{ + { + name: "valid input", + input: []interface{}{ + map[string]interface{}{ + "type": "Ready", + "status": "True", + "reason": "Initialized", + "message": "VM is ready", + }, + }, + expected: []kubevirtapiv1.VirtualMachineCondition{ + { + Type: kubevirtapiv1.VirtualMachineConditionType("Ready"), + Status: k8sv1.ConditionStatus("True"), + Reason: "Initialized", + Message: "VM is ready", + }, + }, + wantErr: false, + }, + { + name: "empty input", + input: []interface{}{}, + expected: []kubevirtapiv1.VirtualMachineCondition{}, + wantErr: false, + }, + { + name: "invalid input", + input: []interface{}{ + map[string]interface{}{ + "type": "InvalidType", + "status": "InvalidStatus", + }, + }, + expected: []kubevirtapiv1.VirtualMachineCondition{ + { + Type: kubevirtapiv1.VirtualMachineConditionType("InvalidType"), + Status: k8sv1.ConditionStatus("InvalidStatus"), + }, + }, + wantErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := expandVirtualMachineConditions(tt.input) + if (err != nil) != tt.wantErr { + t.Errorf("expandVirtualMachineConditions() error = %v, wantErr %v", err, tt.wantErr) + return + } + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestFlattenVirtualMachineConditions(t *testing.T) { + tests := []struct { + name string + input []kubevirtapiv1.VirtualMachineCondition + expected []interface{} + }{ + { + name: "valid input", + input: []kubevirtapiv1.VirtualMachineCondition{ + { + Type: kubevirtapiv1.VirtualMachineConditionType("Ready"), + Status: k8sv1.ConditionStatus("True"), + Reason: "Initialized", + Message: "VM is ready", + }, + }, + expected: []interface{}{ + map[string]interface{}{ + "type": "Ready", + "status": "True", + "reason": "Initialized", + "message": "VM is ready", + }, + }, + }, + { + name: "empty input", + input: []kubevirtapiv1.VirtualMachineCondition{}, + expected: []interface{}{}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := flattenVirtualMachineConditions(tt.input) + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestExpandVirtualMachineStateChangeRequests(t *testing.T) { + testUID := k8stypes.UID("1234") + tests := []struct { + name string + input []interface{} + expected []kubevirtapiv1.VirtualMachineStateChangeRequest + }{ + { + name: "valid input", + input: []interface{}{ + map[string]interface{}{ + "action": "Start", + "data": map[string]interface{}{ + "key1": "value1", + }, + "uid": "1234", + }, + }, + expected: []kubevirtapiv1.VirtualMachineStateChangeRequest{ + { + Action: kubevirtapiv1.StateChangeRequestAction("Start"), + Data: utils.ExpandStringMap(map[string]interface{}{"key1": "value1"}), + UID: &testUID, + }, + }, + }, + { + name: "empty input", + input: []interface{}{}, + expected: []kubevirtapiv1.VirtualMachineStateChangeRequest{}, + }, + { + name: "partial input", + input: []interface{}{ + map[string]interface{}{ + "action": "Stop", + }, + }, + expected: []kubevirtapiv1.VirtualMachineStateChangeRequest{ + { + Action: kubevirtapiv1.StateChangeRequestAction("Stop"), + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := expandVirtualMachineStateChangeRequests(tt.input) + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestFlattenVirtualMachineStateChangeRequests(t *testing.T) { + tests := []struct { + name string + input []kubevirtapiv1.VirtualMachineStateChangeRequest + expected []interface{} + }{ + { + name: "empty input", + input: []kubevirtapiv1.VirtualMachineStateChangeRequest{}, + expected: []interface{}{}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := flattenVirtualMachineStateChangeRequests(tt.input) + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestExpandVirtualMachineStatus(t *testing.T) { + tests := []struct { + name string + input []interface{} + expected kubevirtapiv1.VirtualMachineStatus + }{ + { + name: "full input", + input: []interface{}{ + map[string]interface{}{ + "created": true, + "ready": false, + "conditions": []interface{}{ + map[string]interface{}{ + "type": "Ready", + "status": "True", + }, + }, + "state_change_requests": []interface{}{ + map[string]interface{}{ + "action": "Start", + "data": map[string]interface{}{ + "key1": "value1", + }, + "uid": "1234", + }, + }, + }, + }, + expected: kubevirtapiv1.VirtualMachineStatus{ + Created: true, + Ready: false, + Conditions: []kubevirtapiv1.VirtualMachineCondition{ + { + Type: kubevirtapiv1.VirtualMachineConditionType("Ready"), + Status: k8sv1.ConditionStatus("True"), + }, + }, + StateChangeRequests: []kubevirtapiv1.VirtualMachineStateChangeRequest{ + { + Action: kubevirtapiv1.StateChangeRequestAction("Start"), + Data: utils.ExpandStringMap(map[string]interface{}{"key1": "value1"}), + }, + }, + }, + }, + { + name: "empty input", + input: []interface{}{}, + expected: kubevirtapiv1.VirtualMachineStatus{}, + }, + { + name: "partial input", + input: []interface{}{ + map[string]interface{}{ + "created": true, + }, + }, + expected: kubevirtapiv1.VirtualMachineStatus{ + Created: true, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + _, err := expandVirtualMachineStatus(tt.input) + assert.NoError(t, err) + + }) + } +} + +func TestFlattenVirtualMachineStatus(t *testing.T) { + tests := []struct { + name string + input kubevirtapiv1.VirtualMachineStatus + expected []interface{} + }{ + { + name: "full input", + input: kubevirtapiv1.VirtualMachineStatus{ + Created: true, + Ready: false, + Conditions: []kubevirtapiv1.VirtualMachineCondition{ + { + Type: kubevirtapiv1.VirtualMachineConditionType("Ready"), + Status: k8sv1.ConditionStatus("True"), + }, + }, + StateChangeRequests: []kubevirtapiv1.VirtualMachineStateChangeRequest{ + { + Action: kubevirtapiv1.StateChangeRequestAction("Start"), + Data: utils.ExpandStringMap(map[string]interface{}{"key1": "value1"}), + }, + }, + }, + expected: []interface{}{ + map[string]interface{}{ + "created": true, + "ready": false, + "conditions": []interface{}{ + map[string]interface{}{ + "type": "Ready", + "status": "True", + }, + }, + "state_change_requests": []interface{}{ + map[string]interface{}{ + "action": "Start", + "data": map[string]interface{}{ + "key1": "value1", + }, + "uid": "1234", + }, + }, + }, + }, + }, + { + name: "empty input", + input: kubevirtapiv1.VirtualMachineStatus{}, + expected: []interface{}{ + map[string]interface{}{ + "created": false, + "ready": false, + "conditions": []interface{}{}, + "state_change_requests": []interface{}{}, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + flattenVirtualMachineStatus(tt.input) + + }) + } +} diff --git a/spectrocloud/kubevirt/schema/virtualmachineinstance/virtualmachineinstance_test.go b/spectrocloud/kubevirt/schema/virtualmachineinstance/virtualmachineinstance_test.go new file mode 100644 index 00000000..713b1fac --- /dev/null +++ b/spectrocloud/kubevirt/schema/virtualmachineinstance/virtualmachineinstance_test.go @@ -0,0 +1,252 @@ +package virtualmachineinstance + +import ( + "github.com/stretchr/testify/assert" + "k8s.io/apimachinery/pkg/api/resource" + kubevirtapiv1 "kubevirt.io/api/core/v1" + "testing" +) + +func TestExpandProbe(t *testing.T) { + tests := []struct { + name string + input []interface{} + expected *kubevirtapiv1.Probe + }{ + { + name: "Empty input", + input: []interface{}{}, + expected: nil, + }, + { + name: "Nil input", + input: []interface{}{ + nil, + }, + expected: nil, + }, + { + name: "Valid input", + input: []interface{}{ + map[string]interface{}{ + // Add key-value pairs for your Probe fields here + }, + }, + expected: &kubevirtapiv1.Probe{ + // Fill in the expected Probe fields + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := expandProbe(tt.input) + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestFlattenProbe(t *testing.T) { + tests := []struct { + name string + input kubevirtapiv1.Probe + expected []interface{} + }{ + { + name: "Empty input", + input: kubevirtapiv1.Probe{}, + expected: []interface{}{map[string]interface{}{}}, + }, + { + name: "Valid input", + input: kubevirtapiv1.Probe{ + // Fill in the Probe fields + }, + expected: []interface{}{ + map[string]interface{}{ + // Add key-value pairs for your Probe fields here + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := flattenProbe(tt.input) + assert.ElementsMatch(t, tt.expected, result) + }) + } +} + +func TestFlattenContainerDisk(t *testing.T) { + tests := []struct { + name string + input kubevirtapiv1.ContainerDiskSource + expected []interface{} + }{ + { + name: "Empty input", + input: kubevirtapiv1.ContainerDiskSource{ + Image: "", + }, + expected: []interface{}{ + map[string]interface{}{ + "image_url": "", + }, + }, + }, + { + name: "Valid input", + input: kubevirtapiv1.ContainerDiskSource{ + Image: "registry.example.com/my-image", + }, + expected: []interface{}{ + map[string]interface{}{ + "image_url": "registry.example.com/my-image", + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := flattenContainerDisk(tt.input) + assert.ElementsMatch(t, tt.expected, result) + }) + } +} + +func TestFlattenCloudInitNoCloud(t *testing.T) { + tests := []struct { + name string + input kubevirtapiv1.CloudInitNoCloudSource + expected []interface{} + }{ + { + name: "Empty input", + input: kubevirtapiv1.CloudInitNoCloudSource{ + UserData: "", + }, + expected: []interface{}{ + map[string]interface{}{ + "user_data": "", + }, + }, + }, + { + name: "Valid input", + input: kubevirtapiv1.CloudInitNoCloudSource{ + UserData: "user-data-content", + }, + expected: []interface{}{ + map[string]interface{}{ + "user_data": "user-data-content", + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := flattenCloudInitNoCloud(tt.input) + assert.ElementsMatch(t, tt.expected, result) + }) + } +} + +func TestFlattenEphemeral(t *testing.T) { + tests := []struct { + name string + input kubevirtapiv1.EphemeralVolumeSource + expected []interface{} + }{ + { + name: "Empty input", + input: kubevirtapiv1.EphemeralVolumeSource{ + PersistentVolumeClaim: nil, + }, + expected: []interface{}{ + map[string]interface{}{ + "persistent_volume_claim": nil, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + flattenEphemeral(tt.input) + + }) + } +} + +func TestFlattenEmptyDisk(t *testing.T) { + tests := []struct { + name string + input kubevirtapiv1.EmptyDiskSource + expected []interface{} + }{ + { + name: "Empty input", + input: kubevirtapiv1.EmptyDiskSource{ + Capacity: resource.Quantity{}, + }, + expected: []interface{}{ + map[string]interface{}{ + "capacity": "", + }, + }, + }, + { + name: "Valid input", + input: kubevirtapiv1.EmptyDiskSource{ + Capacity: resource.Quantity{}, + }, + expected: []interface{}{ + map[string]interface{}{ + "capacity": "10Gi", + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + flattenEmptyDisk(tt.input) + }) + } +} + +func TestFlattenConfigMap(t *testing.T) { + tests := []struct { + name string + input kubevirtapiv1.ConfigMapVolumeSource + expected []interface{} + }{ + { + name: "Empty input", + input: kubevirtapiv1.ConfigMapVolumeSource{}, + expected: []interface{}{ + map[string]interface{}{ + "name": "", + }, + }, + }, + { + name: "Valid input", + input: kubevirtapiv1.ConfigMapVolumeSource{}, + expected: []interface{}{ + map[string]interface{}{ + "name": "my-config-map", + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + flattenConfigMap(tt.input) + }) + } +} diff --git a/spectrocloud/kubevirt/test_utils/test_utils_test.go b/spectrocloud/kubevirt/test_utils/test_utils_test.go new file mode 100644 index 00000000..edded2df --- /dev/null +++ b/spectrocloud/kubevirt/test_utils/test_utils_test.go @@ -0,0 +1,53 @@ +package test_utils + +import ( + "github.com/stretchr/testify/assert" + "testing" +) + +func TestGetPVCRequirements(t *testing.T) { + tests := []struct { + name string + dataVolume interface{} + expected interface{} + }{ + { + name: "Valid input", + dataVolume: map[string]interface{}{ + "spec": []interface{}{ + map[string]interface{}{ + "pvc": []interface{}{ + map[string]interface{}{ + "resources": []interface{}{ + map[string]interface{}{ + "requests": map[string]interface{}{ + "storage": "10Gi", + }, + "limits": map[string]interface{}{ + "storage": "20Gi", + }, + }, + }, + }, + }, + }, + }, + }, + expected: map[string]interface{}{ + "requests": map[string]interface{}{ + "storage": "10Gi", + }, + "limits": map[string]interface{}{ + "storage": "20Gi", + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := GetPVCRequirements(tt.dataVolume) + assert.Equal(t, tt.expected, result) + }) + } +} diff --git a/spectrocloud/kubevirt/utils/utils_test.go b/spectrocloud/kubevirt/utils/utils_test.go new file mode 100644 index 00000000..b1113c13 --- /dev/null +++ b/spectrocloud/kubevirt/utils/utils_test.go @@ -0,0 +1,327 @@ +package utils + +import ( + "encoding/base64" + "fmt" + "github.com/hashicorp/go-cty/cty" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/stretchr/testify/assert" + api "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + "testing" +) + +func TestValidateAnnotations(t *testing.T) { + tests := []struct { + name string + value interface{} + key string + expected []error + }{ + { + name: "Valid annotations", + value: map[string]interface{}{ + "valid.annotation/key": "value", + "another.valid/key": "value", + }, + key: "annotations", + expected: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + _, es := ValidateAnnotations(tt.value, tt.key) + assert.Equal(t, tt.expected, es) + }) + } +} + +func TestValidateName(t *testing.T) { + tests := []struct { + name string + value interface{} + key string + expected []error + }{ + { + name: "Valid name", + value: "valid-name", + key: "name", + expected: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + _, es := ValidateName(tt.value, tt.key) + assert.Equal(t, tt.expected, es) + }) + } +} + +func TestValidateGenerateName(t *testing.T) { + tests := []struct { + name string + value interface{} + key string + expected []error + }{ + { + name: "Valid generate name", + value: "valid-name", + key: "generate_name", + expected: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + _, es := ValidateGenerateName(tt.value, tt.key) + assert.Equal(t, tt.expected, es) + }) + } +} + +func TestValidateLabels(t *testing.T) { + tests := []struct { + name string + value interface{} + key string + expected []error + }{ + { + name: "Valid labels", + value: map[string]interface{}{ + "valid.label/key": "valid-value", + }, + key: "labels", + expected: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + _, es := ValidateLabels(tt.value, tt.key) + assert.Equal(t, tt.expected, es) + }) + } +} + +func TestValidateTypeStringNullableInt(t *testing.T) { + tests := []struct { + name string + value interface{} + key string + expected []error + }{ + { + name: "Valid integer", + value: "123", + key: "nullable_int", + expected: nil, + }, + { + name: "Empty string", + value: "", + key: "nullable_int", + expected: nil, + }, + { + name: "Invalid string", + value: "abc", + key: "nullable_int", + expected: []error{ + fmt.Errorf("nullable_int: cannot parse 'abc' as int: strconv.ParseInt: parsing \"abc\": invalid syntax"), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + _, es := ValidateTypeStringNullableInt(tt.value, tt.key) + assert.Equal(t, tt.expected, es) + }) + } +} + +func TestStringIsIntInRange(t *testing.T) { + tests := []struct { + name string + value interface{} + min int + max int + expected diag.Diagnostics + }{ + { + name: "Valid integer within range", + value: "5", + min: 1, + max: 10, + expected: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + diagFunc := StringIsIntInRange(tt.min, tt.max) + result := diagFunc(tt.value, cty.Path{}) + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestIdParts(t *testing.T) { + tests := []struct { + id string + expected [4]string + hasError bool + }{ + {"scope/uid/ns/name", [4]string{"scope", "uid", "ns", "name"}, false}, + {"invalid/id/format", [4]string{"", "", "", ""}, true}, + } + + for _, test := range tests { + scope, uid, ns, name, err := IdParts(test.id) + if test.hasError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, test.expected, [4]string{scope, uid, ns, name}) + } + } +} + +func TestIdPartsDV(t *testing.T) { + tests := []struct { + id string + expected [5]string + hasError bool + }{ + {"scope/uid/ns/name/dv", [5]string{"scope", "uid", "ns", "name", "dv"}, false}, + {"invalid/id/format", [5]string{"", "", "", "", ""}, true}, + } + + for _, test := range tests { + scope, uid, ns, name, dv, err := IdPartsDV(test.id) + if test.hasError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, test.expected, [5]string{scope, uid, ns, name, dv}) + } + } +} + +func TestFlattenStringMap(t *testing.T) { + m := map[string]string{"key": "value"} + result := FlattenStringMap(m) + assert.Equal(t, map[string]interface{}{"key": "value"}, result) +} + +func TestExpandStringMap(t *testing.T) { + m := map[string]interface{}{"key": "value"} + result := ExpandStringMap(m) + assert.Equal(t, map[string]string{"key": "value"}, result) +} + +func TestExpandBase64MapToByteMap(t *testing.T) { + m := map[string]interface{}{"key": base64.StdEncoding.EncodeToString([]byte("value"))} + result := ExpandBase64MapToByteMap(m) + assert.Equal(t, map[string][]byte{"key": []byte("value")}, result) +} + +func TestExpandStringMapToByteMap(t *testing.T) { + m := map[string]interface{}{"key": "value"} + result := ExpandStringMapToByteMap(m) + assert.Equal(t, map[string][]byte{"key": []byte("value")}, result) +} + +func TestExpandStringSlice(t *testing.T) { + s := []interface{}{"a", "b", "c"} + result := ExpandStringSlice(s) + assert.Equal(t, []string{"a", "b", "c"}, result) +} + +func TestFlattenByteMapToBase64Map(t *testing.T) { + m := map[string][]byte{"key": []byte("value")} + result := FlattenByteMapToBase64Map(m) + assert.Equal(t, map[string]string{"key": base64.StdEncoding.EncodeToString([]byte("value"))}, result) +} + +func TestFlattenByteMapToStringMap(t *testing.T) { + m := map[string][]byte{"key": []byte("value")} + result := FlattenByteMapToStringMap(m) + assert.Equal(t, map[string]string{"key": "value"}, result) +} + +func TestPtrToString(t *testing.T) { + s := "value" + ptr := PtrToString(s) + assert.Equal(t, &s, ptr) +} + +func TestPtrToBool(t *testing.T) { + b := true + ptr := PtrToBool(b) + assert.Equal(t, &b, ptr) +} + +func TestPtrToInt32(t *testing.T) { + i := int32(10) + ptr := PtrToInt32(i) + assert.Equal(t, &i, ptr) +} + +func TestPtrToInt64(t *testing.T) { + i := int64(10) + ptr := PtrToInt64(i) + assert.Equal(t, &i, ptr) +} + +func TestSliceOfString(t *testing.T) { + s := []interface{}{"a", "b", "c"} + result := SliceOfString(s) + assert.Equal(t, []string{"a", "b", "c"}, result) +} + +func TestBase64EncodeStringMap(t *testing.T) { + m := map[string]interface{}{"key": "value"} + result := Base64EncodeStringMap(m) + assert.Equal(t, map[string]interface{}{"key": base64.StdEncoding.EncodeToString([]byte("value"))}, result) +} + +func TestNewInt64Set(t *testing.T) { + in := []int64{3, 1, 2} + set := NewInt64Set(schema.HashInt, in) + assert.Equal(t, 3, len(set.List())) +} + +func TestSchemaSetToStringArray(t *testing.T) { + set := schema.NewSet(schema.HashString, []interface{}{"a", "b", "c"}) + result := SchemaSetToStringArray(set) + assert.Equal(t, 3, len(result)) +} + +func TestExpandMapToResourceList(t *testing.T) { + m := map[string]interface{}{"cpu": "100m", "memory": "200Mi"} + rl, err := ExpandMapToResourceList(m) + assert.NoError(t, err) + assert.Equal(t, api.ResourceList{ + "cpu": resource.MustParse("100m"), + "memory": resource.MustParse("200Mi"), + }, *rl) +} + +func TestFlattenResourceList(t *testing.T) { + rl := api.ResourceList{ + "cpu": resource.MustParse("100m"), + "memory": resource.MustParse("200Mi"), + } + m := FlattenResourceList(rl) + assert.Equal(t, map[string]string{ + "cpu": "100m", + "memory": "200Mi", + }, m) +} diff --git a/spectrocloud/provider_test.go b/spectrocloud/provider_test.go index c66850b1..a715cd64 100644 --- a/spectrocloud/provider_test.go +++ b/spectrocloud/provider_test.go @@ -1,6 +1,12 @@ package spectrocloud -import "testing" +import ( + "context" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/stretchr/testify/assert" + "testing" +) func TestProvider(t *testing.T) { p := New("111.111.111")() // test version @@ -11,3 +17,71 @@ func TestProvider(t *testing.T) { t.Fatal(err) } } + +func prepareBaseProviderConfig() *schema.ResourceData { + basSchema := &schema.Resource{ + Schema: map[string]*schema.Schema{ + "host": { + Type: schema.TypeString, + Optional: true, + Description: "The Spectro Cloud API host url. Can also be set with the `SPECTROCLOUD_HOST` environment variable. Defaults to https://api.spectrocloud.com", + DefaultFunc: schema.EnvDefaultFunc("SPECTROCLOUD_HOST", "api.spectrocloud.com"), + }, + "api_key": { + Type: schema.TypeString, + Optional: true, + Sensitive: true, + Description: "The Spectro Cloud API key. Can also be set with the `SPECTROCLOUD_APIKEY` environment variable.", + DefaultFunc: schema.EnvDefaultFunc("SPECTROCLOUD_APIKEY", nil), + }, + "trace": { + Type: schema.TypeBool, + Optional: true, + Description: "Enable HTTP request tracing. Can also be set with the `SPECTROCLOUD_TRACE` environment variable. To enable Terraform debug logging, set `TF_LOG=DEBUG`. Visit the Terraform documentation to learn more about Terraform [debugging](https://developer.hashicorp.com/terraform/plugin/log/managing).", + DefaultFunc: schema.EnvDefaultFunc("SPECTROCLOUD_TRACE", nil), + }, + "retry_attempts": { + Type: schema.TypeInt, + Optional: true, + Description: "Number of retry attempts. Can also be set with the `SPECTROCLOUD_RETRY_ATTEMPTS` environment variable. Defaults to 10.", + DefaultFunc: schema.EnvDefaultFunc("SPECTROCLOUD_RETRY_ATTEMPTS", 10), + }, + "project_name": { + Type: schema.TypeString, + Optional: true, + Default: "Default", + // cannot be empty + ValidateFunc: validation.StringIsNotEmpty, + Description: "The Palette project the provider will target. If no value is provided, the `Default` Palette project is used. The default value is `Default`.", + }, + "ignore_insecure_tls_error": { + Type: schema.TypeBool, + Optional: true, + Description: "Ignore insecure TLS errors for Spectro Cloud API endpoints. Defaults to false.", + }, + }, + } + + d := basSchema.TestResourceData() + _ = d.Set("host", "127.0.0.1:8080") + _ = d.Set("project_name", "Default") + _ = d.Set("ignore_insecure_tls_error", true) + _ = d.Set("api_key", "12345") + _ = d.Set("trace", true) + _ = d.Set("retry_attempts", 2) + return d +} + +func TestProviderConfig(t *testing.T) { + d := prepareBaseProviderConfig() + _, diags := providerConfigure(context.Background(), d) + assert.Empty(t, diags) +} + +func TestProviderConfigValidError(t *testing.T) { + d := prepareBaseProviderConfig() + // validating empty api key use case + _ = d.Set("api_key", "") + _, diags := providerConfigure(context.Background(), d) + assertFirstDiagMessage(t, diags, "Unable to create Spectro Cloud client") +} diff --git a/spectrocloud/resource_alert_test.go b/spectrocloud/resource_alert_test.go index aeb598f3..dff631dd 100644 --- a/spectrocloud/resource_alert_test.go +++ b/spectrocloud/resource_alert_test.go @@ -1,6 +1,8 @@ package spectrocloud import ( + "context" + "github.com/stretchr/testify/assert" "reflect" "testing" @@ -134,12 +136,14 @@ func TestToAlertHttpEmail(t *testing.T) { func prepareAlertTestData() *schema.ResourceData { rd := resourceAlert().TestResourceData() - rd.Set("type", "email") - rd.Set("is_active", true) - rd.Set("alert_all_users", false) - rd.Set("project", "Default") + rd.SetId("test-alert-id") + _ = rd.Set("type", "email") + _ = rd.Set("is_active", true) + _ = rd.Set("alert_all_users", false) + _ = rd.Set("project", "Default") + _ = rd.Set("component", "ClusterHealth") emails := []string{"testuser1@spectrocloud.com", "testuser2@spectrocloud.com"} - rd.Set("identifiers", emails) + _ = rd.Set("identifiers", emails) var http []map[string]interface{} hookConfig := map[string]interface{}{ "method": "POST", @@ -151,160 +155,34 @@ func prepareAlertTestData() *schema.ResourceData { }, } http = append(http, hookConfig) - rd.Set("http", http) + _ = rd.Set("http", http) return rd } -//func TestGetProjectIDError(t *testing.T) { -// assert := assert.New(t) -// rd := prepareAlertTestData() -// m := &client.V1Client{} -// pjtUid, err := getProjectID(rd, m) -// if err == nil { -// assert.Error(errors.New("unexpected Error")) -// } -// assert.Equal(err.Error(), "unable to read project uid") -// assert.Equal("", pjtUid) -//} - -//func TestResourceAlertCreate(t *testing.T) { -// rd := prepareAlertTestData() -// m := &client.V1Client{} -// ctx := context.Background() -// diags := resourceAlertCreate(ctx, rd, m) -// if len(diags) > 0 { -// t.Errorf("Unexpected diagnostics: %#v", diags) -// } -//} - -//func TestResourceAlertCreateProjectUIDError(t *testing.T) { -// assert := assert.New(t) -// rd := prepareAlertTestData() -// m := &client.V1Client{} -// ctx := context.Background() -// diags := resourceAlertCreate(ctx, rd, m) -// assert.Equal(diags[0].Summary, "unable to read project uid") -//} - -//func TestResourceAlertCreateAlertUIDError(t *testing.T) { -// assert := assert.New(t) -// rd := prepareAlertTestData() -// m := &client.V1Client{} -// ctx := context.Background() -// diags := resourceAlertCreate(ctx, rd, m) -// assert.Equal(diags[0].Summary, "alert creation failed") -//} - -//func TestResourceAlertUpdate(t *testing.T) { -// -// rd := prepareAlertTestData() -// m := &client.V1Client{} -// ctx := context.Background() -// diags := resourceAlertUpdate(ctx, rd, m) -// if len(diags) > 0 { -// t.Errorf("Unexpected diagnostics: %#v", diags) -// } -//} - -//func TestResourceAlertUpdateError(t *testing.T) { -// assert := assert.New(t) -// rd := prepareAlertTestData() -// m := &client.V1Client{} -// ctx := context.Background() -// diags := resourceAlertUpdate(ctx, rd, m) -// assert.Equal(diags[0].Summary, "alert update failed") -//} - -//func TestResourceAlertDelete(t *testing.T) { -// -// rd := prepareAlertTestData() -// m := &client.V1Client{} -// ctx := context.Background() -// diags := resourceAlertDelete(ctx, rd, m) -// if len(diags) > 0 { -// t.Errorf("Unexpected diagnostics: %#v", diags) -// } -//} - -//func TestResourceAlertDeleteProjectUIDError(t *testing.T) { -// assert := assert.New(t) -// rd := prepareAlertTestData() -// m := &client.V1Client{} -// ctx := context.Background() -// diags := resourceAlertDelete(ctx, rd, m) -// assert.Equal(diags[0].Summary, "unable to read project uid") -//} - -//func TestResourceAlertDeleteError(t *testing.T) { -// assert := assert.New(t) -// rd := prepareAlertTestData() -// m := &client.V1Client{} -// ctx := context.Background() -// diags := resourceAlertDelete(ctx, rd, m) -// assert.Equal(diags[0].Summary, "unable to delete alert") -//} - -//func TestResourceAlertReadAlertNil(t *testing.T) { -// rd := prepareAlertTestData() -// m := &client.V1Client{} -// ctx := context.Background() -// diags := resourceAlertRead(ctx, rd, m) -// -// if len(diags) > 0 { -// t.Errorf("Unexpected diagnostics: %#v", diags) -// } -//} +func TestResourceAlertCreate(t *testing.T) { + rd := prepareAlertTestData() + ctx := context.Background() + diags := resourceAlertCreate(ctx, rd, unitTestMockAPIClient) + assert.Empty(t, diags) +} -//func TestResourceAlertReadAlertEmail(t *testing.T) { -// rd := resourceAlert().TestResourceData() -// rd.Set("type", "email") -// rd.Set("is_active", true) -// rd.Set("alert_all_users", false) -// rd.Set("project", "Default") -// emails := []string{"testuser1@spectrocloud.com", "testuser2@spectrocloud.com"} -// rd.Set("identifiers", emails) -// m := &client.V1Client{} -// ctx := context.Background() -// diags := resourceAlertRead(ctx, rd, m) -// if len(diags) > 0 { -// t.Errorf("Unexpected diagnostics: %#v", diags) -// } -//} +func TestResourceAlertRead(t *testing.T) { + rd := prepareAlertTestData() + ctx := context.Background() + diags := resourceAlertRead(ctx, rd, unitTestMockAPIClient) + assert.Empty(t, diags) +} -//func TestResourceAlertReadAlertHttp(t *testing.T) { -// rd := resourceAlert().TestResourceData() -// rd.Set("type", "http") -// rd.Set("is_active", true) -// rd.Set("alert_all_users", false) -// rd.Set("project", "Default") -// emails := []string{"testuser1@spectrocloud.com", "testuser2@spectrocloud.com"} -// rd.Set("identifiers", emails) -// var http []map[string]interface{} -// hookConfig := map[string]interface{}{ -// "method": "POST", -// "url": "https://www.openhook.com/spc/notify", -// "body": "{ \"text\": \"{{message}}\" }", -// "headers": map[string]interface{}{ -// "tag": "Health", -// "source": "spectrocloud", -// }, -// } -// http = append(http, hookConfig) -// rd.Set("http", http) -// m := &client.V1Client{} -// ctx := context.Background() -// diags := resourceAlertRead(ctx, rd, m) -// if len(diags) > 0 { -// t.Errorf("Unexpected diagnostics: %#v", diags) -// } -//} +func TestResourceAlertUpdate(t *testing.T) { + rd := prepareAlertTestData() + ctx := context.Background() + diags := resourceAlertUpdate(ctx, rd, unitTestMockAPIClient) + assert.Empty(t, diags) +} -//func TestResourceAlertReadNegative(t *testing.T) { -// rd := resourceAlert().TestResourceData() -// m := &client.V1Client{} -// ctx := context.Background() -// diags := resourceAlertRead(ctx, rd, m) -// if len(diags) > 0 { -// t.Errorf("Unexpected diagnostics: %#v", diags) -// } -//} +func TestResourceAlertDelete(t *testing.T) { + rd := prepareAlertTestData() + ctx := context.Background() + diags := resourceAlertDelete(ctx, rd, unitTestMockAPIClient) + assert.Empty(t, diags) +} diff --git a/spectrocloud/resource_appliance_test.go b/spectrocloud/resource_appliance_test.go index c01b9ea7..794109fe 100644 --- a/spectrocloud/resource_appliance_test.go +++ b/spectrocloud/resource_appliance_test.go @@ -1,7 +1,9 @@ package spectrocloud import ( + "context" "github.com/go-openapi/strfmt" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "testing" "github.com/spectrocloud/palette-sdk-go/api/models" @@ -116,3 +118,69 @@ func TestSetFields_WithoutNameTag(t *testing.T) { resultWithoutNameTag := setFields(d, mockTagsWithoutName) assert.Equal(t, expectedApplianceWithoutNameTag, resultWithoutNameTag) } + +func prepareApplianceBaseData() *schema.ResourceData { + d := resourceAppliance().TestResourceData() + _ = d.Set("uid", "test-edge-host-id") + _ = d.Set("wait", false) + d.SetId("test-idz") + return d +} + +func TestResourceApplianceCreateInvalid(t *testing.T) { + + d := prepareApplianceBaseData() + + diags := resourceApplianceCreate(context.Background(), d, unitTestMockAPINegativeClient) + + assert.NotEmpty(t, diags) + assertFirstDiagMessage(t, diags, "Operation not allowed") +} + +func TestResourceApplianceRead(t *testing.T) { + + d := prepareApplianceBaseData() + + diags := resourceApplianceRead(context.Background(), d, unitTestMockAPIClient) + + assert.Empty(t, diags) + +} + +func TestResourceApplianceUpdate(t *testing.T) { + + d := prepareApplianceBaseData() + + diags := resourceApplianceUpdate(context.Background(), d, unitTestMockAPIClient) + + assert.Empty(t, diags) + +} + +func TestResourceApplianceDelete(t *testing.T) { + + d := prepareApplianceBaseData() + + diags := resourceApplianceDelete(context.Background(), d, unitTestMockAPIClient) + + assert.Empty(t, diags) +} + +func TestResourceApplianceDeleteInvalid(t *testing.T) { + + d := prepareApplianceBaseData() + + diags := resourceApplianceDelete(context.Background(), d, unitTestMockAPINegativeClient) + + assert.NotEmpty(t, diags) + assertFirstDiagMessage(t, diags, "No edge host found") + +} + +func TestResourceApplianceGetState(t *testing.T) { + + diags := resourceApplianceStateRefreshFunc(getV1ClientWithResourceContext(unitTestMockAPIClient, "project"), "test") + + assert.NotEmpty(t, diags) + +} diff --git a/spectrocloud/resource_application_profile_test.go b/spectrocloud/resource_application_profile_test.go index 5ec82b6a..1f461bcd 100644 --- a/spectrocloud/resource_application_profile_test.go +++ b/spectrocloud/resource_application_profile_test.go @@ -1,6 +1,7 @@ package spectrocloud import ( + "context" "strings" "testing" @@ -209,20 +210,6 @@ func TestToApplicationProfilePackUpdate(t *testing.T) { } } -//func TestGetAppTiersContent(t *testing.T) { -// appUid := "test-app-tier-id" -// d := getBaseResourceData() -// d.SetId(appUid) -// m := &client.V1Client{} -// appTiers, _, _ := getAppTiersContent(m, d) -// assert.Equal(t, appUid, appTiers[0].Metadata.UID) -// assert.Equal(t, "mysql", appTiers[0].Metadata.Name) -// assert.Equal(t, "test-source-uid", appTiers[0].Spec.SourceAppTierUID) -// assert.Equal(t, "5.25", appTiers[0].Spec.Version) -// assert.Equal(t, "test-registry-id", appTiers[0].Spec.RegistryUID) -// assert.Equal(t, 10, int(appTiers[0].Spec.InstallOrder)) -//} - func TestGetValueInProperties(t *testing.T) { prop := map[string]interface{}{ "key1": "value1", @@ -234,70 +221,6 @@ func TestGetValueInProperties(t *testing.T) { assert.Equal(t, "", result) } -//func TestFlattenAppPacks(t *testing.T) { -// d := getBaseResourceData() -// ctx := context.Background() -// m := &client.V1Client{} -// -// var diagPack []*models.V1PackManifestEntity -// diagPack = append(diagPack, &models.V1PackManifestEntity{ -// UID: "test-pack-uid", -// Name: types.Ptr("kafka"), -// RegistryUID: "test-pub-registry-uid", -// Type: "manifest", -// Values: "test values", -// }) -// -// var tiers []*models.V1AppTierRef -// tiers = append(tiers, &models.V1AppTierRef{ -// Type: "manifest", -// UID: "test-tier-uid", -// Name: "kafka", -// Version: "5.1", -// }) -// -// var tierDet []*models.V1AppTier -// var manifest []*models.V1ObjectReference -// manifest = append(manifest, &models.V1ObjectReference{ -// Name: "kafka-dep", -// UID: "test-manifest-uid", -// Kind: "Deployment", -// }) -// -// var props []*models.V1AppTierProperty -// props = append(props, &models.V1AppTierProperty{ -// Name: "prop_key", -// Value: "prop_value", -// Type: "string", -// Format: "", -// }) -// tierDet = append(tierDet, &models.V1AppTier{ -// Metadata: &models.V1ObjectMeta{ -// UID: "test-uid", -// Name: "kafka", -// }, -// Spec: &models.V1AppTierSpec{ -// Type: "manifest", -// SourceAppTierUID: "test-source-uid", -// Version: "5.25", -// RegistryUID: "test-registry-id", -// InstallOrder: 10, -// Manifests: manifest, -// Properties: props, -// }, -// }) -// -// re, _ := flattenAppPacks(m, diagPack, tiers, tierDet, d, ctx) -// assert.Equal(t, "test-uid", re[0].(map[string]interface{})["uid"]) -// assert.Equal(t, "test-registry-id", re[0].(map[string]interface{})["registry_uid"]) -// assert.Equal(t, "kafka", re[0].(map[string]interface{})["name"]) -// assert.Equal(t, "test-source-uid", re[0].(map[string]interface{})["source_app_tier"]) -// assert.Equal(t, "prop_value", re[0].(map[string]interface{})["properties"].(map[string]string)["prop_key"]) -// assert.Equal(t, "kafka-dep", re[0].(map[string]interface{})["manifest"].([]interface{})[0].(map[string]interface{})["name"]) -// assert.Equal(t, "test-manifest-uid", re[0].(map[string]interface{})["manifest"].([]interface{})[0].(map[string]interface{})["uid"]) -// assert.Equal(t, "test: \n content", re[0].(map[string]interface{})["manifest"].([]interface{})[0].(map[string]interface{})["content"]) -//} - func TestToPropertiesTier(t *testing.T) { props := map[string]interface{}{ "properties": map[string]interface{}{ @@ -335,7 +258,7 @@ func TestToApplicationProfileCreate(t *testing.T) { "dbname": "testDB", }, }) - d.Set("pack", p) + _ = d.Set("pack", p) cp, _ := toApplicationProfileCreate(d) assert.Equal(t, p[0]["type"], string(cp.Spec.Template.AppTiers[0].Type)) assert.Equal(t, p[0]["source_app_tier"], cp.Spec.Template.AppTiers[0].SourceAppTierUID) @@ -344,40 +267,48 @@ func TestToApplicationProfileCreate(t *testing.T) { assert.Equal(t, "testDB", string(cp.Spec.Template.AppTiers[0].Properties[0].Value)) } -//func TestToApplicationTiersUpdate(t *testing.T) { -// d := getBaseResourceData() -// var p []map[string]interface{} -// p = append(p, map[string]interface{}{ -// "type": "operator-instance", -// "source_app_tier": "testSUID", -// "registry_uid": "test_reg_uid", -// "uid": "test_pack_uid", -// "name": "mysql", -// "properties": map[string]interface{}{ -// "dbname": "testDB", -// }, -// }) -// d.Set("pack", p) -// m := &client.V1Client{} -// _, ut, _, _ := toApplicationTiersUpdate(d, m) -// assert.Equal(t, "mysql", ut["test-uid"].Name) -// assert.Equal(t, "dbname", string(ut["test-uid"].Properties[0].Name)) -// assert.Equal(t, "testDB", string(ut["test-uid"].Properties[0].Value)) -//} - -//func TestResourceApplicationProfileCreate(t *testing.T) { -// d := getBaseResourceData() -// ctx := context.Background() -// m := &client.V1Client{} -// s := resourceApplicationProfileCreate(ctx, d, m) -// assert.Equal(t, false, s.HasError()) -// -//} - -//func TestResourceApplicationProfileDelete(t *testing.T) { -// d := getBaseResourceData() -// ctx := context.Background() -// m := &client.V1Client{} -// r := resourceApplicationProfileDelete(ctx, d, m) -// assert.Equal(t, false, r.HasError()) -//} +func TestToApplicationTiersUpdate(t *testing.T) { + d := getBaseResourceData() + d.SetId("test-app-profile-id") + var p []map[string]interface{} + p = append(p, map[string]interface{}{ + "type": "operator-instance", + "source_app_tier": "testSUID", + "registry_uid": "test_reg_uid", + "uid": "test_pack_uid", + "name": "mysql", + "properties": map[string]interface{}{ + "dbname": "testDB", + }, + }) + _ = d.Set("pack", p) + + _, _, _, err := toApplicationTiersUpdate(d, getV1ClientWithResourceContext(unitTestMockAPIClient, "")) + assert.Empty(t, err) +} + +func TestResourceApplicationProfileCreate(t *testing.T) { + d := getBaseResourceData() + var ctx context.Context + _ = d.Set("context", "project") + s := resourceApplicationProfileCreate(ctx, d, unitTestMockAPIClient) + assert.Equal(t, false, s.HasError()) + +} + +func TestResourceApplicationProfileUpdate(t *testing.T) { + d := getBaseResourceData() + var ctx context.Context + _ = d.Set("context", "project") + s := resourceApplicationProfileUpdate(ctx, d, unitTestMockAPIClient) + assert.Equal(t, false, s.HasError()) + +} + +func TestResourceApplicationProfileDelete(t *testing.T) { + d := getBaseResourceData() + var ctx context.Context + d.SetId("test-app-profile-id") + r := resourceApplicationProfileDelete(ctx, d, unitTestMockAPIClient) + assert.Equal(t, false, r.HasError()) +} diff --git a/spectrocloud/resource_application_test.go b/spectrocloud/resource_application_test.go new file mode 100644 index 00000000..7c699d08 --- /dev/null +++ b/spectrocloud/resource_application_test.go @@ -0,0 +1,61 @@ +package spectrocloud + +import ( + "context" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/stretchr/testify/assert" + "testing" +) + +func prepareBaseResourceApplicationData() *schema.ResourceData { + d := resourceApplication().TestResourceData() + d.SetId("test-application-id") + _ = d.Set("name", "test-application") + _ = d.Set("tags", []string{"test:dev"}) + _ = d.Set("application_profile_uid", "test-application-profile-id") + var con []interface{} + con = append(con, map[string]interface{}{ + "cluster_uid": "test-cluster-id", + "cluster_group_uid": "test-cluster-group-id", + "cluster_context": "project", + "cluster_name": "test-cluster", + "limits": []interface{}{ + map[string]interface{}{ + "cpu": 2, + "memory": 1000, + "storage": 100, + }, + }, + }) + + _ = d.Set("config", con) + return d +} + +func TestResourceApplicationCreate(t *testing.T) { + d := prepareBaseResourceApplicationData() + + diags := resourceApplicationCreate(context.Background(), d, unitTestMockAPIClient) + assert.Empty(t, diags) +} + +func TestResourceApplicationRead(t *testing.T) { + d := prepareBaseResourceApplicationData() + + diags := resourceApplicationRead(context.Background(), d, unitTestMockAPIClient) + assert.Empty(t, diags) +} + +func TestResourceApplicationUpdate(t *testing.T) { + d := prepareBaseResourceApplicationData() + + diags := resourceApplicationUpdate(context.Background(), d, unitTestMockAPIClient) + assert.Empty(t, diags) +} + +func TestResourceApplicationDelete(t *testing.T) { + d := prepareBaseResourceApplicationData() + + diags := resourceApplicationDelete(context.Background(), d, unitTestMockAPIClient) + assert.Empty(t, diags) +} diff --git a/spectrocloud/resource_backup_storage_location_test.go b/spectrocloud/resource_backup_storage_location_test.go new file mode 100644 index 00000000..2023bd49 --- /dev/null +++ b/spectrocloud/resource_backup_storage_location_test.go @@ -0,0 +1,83 @@ +package spectrocloud + +import ( + "context" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/stretchr/testify/assert" + "testing" +) + +func prepareResourceBackupStorageLocation() *schema.ResourceData { + d := resourceBackupStorageLocation().TestResourceData() + d.SetId("test-backup-location-id") + _ = d.Set("name", "test-backup-location") + _ = d.Set("is_default", false) + _ = d.Set("region", "test-east") + _ = d.Set("bucket_name", "test-bucket") + _ = d.Set("ca_cert", "test-cert") + s3 := make([]interface{}, 0) + s3 = append(s3, map[string]interface{}{ + "s3_url": "s3://test/test", + "s3_force_path_style": false, + "credential_type": "secret", + "access_key": "test-access-key", + "secret_key": "test-secret-key", + "arn": "test-arn", + "external_id": "test-external-id", + }) + _ = d.Set("s3", s3) + + return d +} + +func TestResourceBackupStorageLocationCreate(t *testing.T) { + ctx := context.Background() + d := prepareResourceBackupStorageLocation() + diags := resourceBackupStorageLocationCreate(ctx, d, unitTestMockAPIClient) + assert.Len(t, diags, 0) + assert.Equal(t, "test-backup-location-id", d.Id()) +} + +func TestResourceBackupStorageLocationCreateSTS(t *testing.T) { + ctx := context.Background() + d := prepareResourceBackupStorageLocation() + s3 := make([]interface{}, 0) + s3 = append(s3, map[string]interface{}{ + "s3_url": "s3://test/test", + "s3_force_path_style": false, + "credential_type": "sts", + "access_key": "test-access-key", + "secret_key": "test-secret-key", + "arn": "test-arn", + "external_id": "test-external-id", + }) + _ = d.Set("s3", s3) + diags := resourceBackupStorageLocationCreate(ctx, d, unitTestMockAPIClient) + assert.Len(t, diags, 0) + assert.Equal(t, "test-backup-location-id", d.Id()) +} + +func TestResourceBackupStorageLocationRead(t *testing.T) { + ctx := context.Background() + d := prepareResourceBackupStorageLocation() + d.SetId("test-bsl-location-id") + diags := resourceBackupStorageLocationRead(ctx, d, unitTestMockAPIClient) + assert.Len(t, diags, 0) + assert.Equal(t, "test-bsl-location-id", d.Id()) +} + +func TestResourceBackupStorageLocationUpdate(t *testing.T) { + ctx := context.Background() + d := prepareResourceBackupStorageLocation() + diags := resourceBackupStorageLocationUpdate(ctx, d, unitTestMockAPIClient) + assert.Len(t, diags, 0) + assert.Equal(t, "test-backup-location-id", d.Id()) +} + +func TestResourceBackupStorageLocationDelete(t *testing.T) { + ctx := context.Background() + d := prepareResourceBackupStorageLocation() + diags := resourceBackupStorageLocationDelete(ctx, d, unitTestMockAPIClient) + assert.Len(t, diags, 0) + assert.Equal(t, "test-backup-location-id", d.Id()) +} diff --git a/spectrocloud/resource_cloud_account_aws_test.go b/spectrocloud/resource_cloud_account_aws_test.go index 857cf9ca..ce0006b8 100644 --- a/spectrocloud/resource_cloud_account_aws_test.go +++ b/spectrocloud/resource_cloud_account_aws_test.go @@ -1,6 +1,7 @@ package spectrocloud import ( + "context" "testing" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -171,3 +172,56 @@ func TestFlattenCloudAccountAws_NonStsType(t *testing.T) { expectedARNs := []string{"arn:aws:test_policy_secret1", "arn:aws:test_policy_secret2"} assert.ElementsMatch(t, expectedARNs, actualARNs) } + +func prepareBaseAwsAccountTestData() *schema.ResourceData { + d := resourceCloudAccountAws().TestResourceData() + d.SetId("test-aws-account-1") + _ = d.Set("name", "test-aws-account") + _ = d.Set("context", "project") + _ = d.Set("aws_access_key", "test-access-key") + _ = d.Set("aws_secret_key", "test-secret-key") + _ = d.Set("type", "secret") + _ = d.Set("arn", "test-arn") + _ = d.Set("external_id", "test-external-id") + _ = d.Set("partition", "aws") + _ = d.Set("policy_arns", []string{"test-policy-arn"}) + return d +} + +func TestResourceCloudAccountAwsCreate(t *testing.T) { + ctx := context.Background() + d := prepareBaseAwsAccountTestData() + diags := resourceCloudAccountAwsCreate(ctx, d, unitTestMockAPIClient) + assert.Len(t, diags, 0) + assert.Equal(t, "test-aws-account-1", d.Id()) +} + +func TestResourceCloudAccountAwsRead(t *testing.T) { + ctx := context.Background() + d := prepareBaseAwsAccountTestData() + diags := resourceCloudAccountAwsRead(ctx, d, unitTestMockAPIClient) + assert.Len(t, diags, 0) + assert.Equal(t, "test-aws-account-1", d.Id()) +} +func TestResourceCloudAccountAwsUpdate(t *testing.T) { + ctx := context.Background() + d := prepareBaseAwsAccountTestData() + diags := resourceCloudAccountAwsUpdate(ctx, d, unitTestMockAPIClient) + assert.Len(t, diags, 0) + assert.Equal(t, "test-aws-account-1", d.Id()) +} +func TestResourceCloudAccountAwsDelete(t *testing.T) { + ctx := context.Background() + d := prepareBaseAwsAccountTestData() + diags := resourceCloudAccountAwsDelete(ctx, d, unitTestMockAPIClient) + assert.Len(t, diags, 0) +} + +func TestResourceCloudAccountAwsImport(t *testing.T) { + ctx := context.Background() + d := prepareBaseAwsAccountTestData() + d.SetId("test-import-acc-id:project") + _, err := resourceAccountAwsImport(ctx, d, unitTestMockAPIClient) + assert.Empty(t, err) + assert.Equal(t, "test-import-acc-id", d.Id()) +} diff --git a/spectrocloud/resource_cloud_account_azure_test.go b/spectrocloud/resource_cloud_account_azure_test.go index d5a1959c..c380dd05 100644 --- a/spectrocloud/resource_cloud_account_azure_test.go +++ b/spectrocloud/resource_cloud_account_azure_test.go @@ -1,6 +1,8 @@ package spectrocloud import ( + "context" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "testing" "github.com/spectrocloud/palette-sdk-go/api/models" @@ -68,3 +70,52 @@ func TestFlattenCloudAccountAzure(t *testing.T) { assert.Equal(t, true, rd.Get("disable_properties_request")) assert.Equal(t, "AzureUSGovernmentCloud", rd.Get("cloud")) } + +func prepareResourceCloudAccountAzureTestData() *schema.ResourceData { + d := resourceCloudAccountAzure().TestResourceData() + d.SetId("test-azure-account-id-1") + _ = d.Set("name", "test-azure-account-1") + _ = d.Set("context", "project") + _ = d.Set("azure_tenant_id", "tenant-azure-id") + _ = d.Set("azure_client_id", "azure-client-id") + _ = d.Set("azure_client_secret", "test-client-secret") + _ = d.Set("tenant_name", "azure-tenant") + _ = d.Set("disable_properties_request", false) + _ = d.Set("cloud", "AzurePublicCloud") + return d +} + +func TestResourceCloudAccountAzureCreate(t *testing.T) { + // Mock context and resource data + d := prepareResourceCloudAccountAzureTestData() + ctx := context.Background() + diags := resourceCloudAccountAzureCreate(ctx, d, unitTestMockAPIClient) + assert.Len(t, diags, 0) + assert.Equal(t, "test-aws-account-1", d.Id()) +} + +func TestResourceCloudAccountAzureRead(t *testing.T) { + // Mock context and resource data + d := prepareResourceCloudAccountAzureTestData() + ctx := context.Background() + diags := resourceCloudAccountAzureRead(ctx, d, unitTestMockAPIClient) + assert.Len(t, diags, 0) + assert.Equal(t, "test-azure-account-id-1", d.Id()) +} + +func TestResourceCloudAccountAzureUpdate(t *testing.T) { + // Mock context and resource data + d := prepareResourceCloudAccountAzureTestData() + ctx := context.Background() + diags := resourceCloudAccountAzureUpdate(ctx, d, unitTestMockAPIClient) + assert.Len(t, diags, 0) + assert.Equal(t, "test-azure-account-id-1", d.Id()) +} + +func TestResourceCloudAccountAzureDelete(t *testing.T) { + // Mock context and resource data + d := prepareResourceCloudAccountAzureTestData() + ctx := context.Background() + diags := resourceCloudAccountAzureDelete(ctx, d, unitTestMockAPIClient) + assert.Len(t, diags, 0) +} diff --git a/spectrocloud/resource_cloud_account_custom_test.go b/spectrocloud/resource_cloud_account_custom_test.go index 241da5a9..20fbf400 100644 --- a/spectrocloud/resource_cloud_account_custom_test.go +++ b/spectrocloud/resource_cloud_account_custom_test.go @@ -1,6 +1,8 @@ package spectrocloud import ( + "context" + "errors" "github.com/spectrocloud/palette-sdk-go/api/models" "github.com/stretchr/testify/assert" "testing" @@ -81,95 +83,82 @@ func TestFlattenCustomCloudAccount(t *testing.T) { assert.Equal(t, "test-cloud", d.Get("cloud")) } -// Need mock -//func TestResourceCustomCloudAccountCreate(t *testing.T) { -// // Mock context and resource data -// ctx := context.Background() -// d := resourceCloudAccountCustom().TestResourceData() -// d.Set("name", "test-name") -// d.Set("cloud", "test-cloud") -// d.Set("private_cloud_gateway_id", "test-private-cloud-gateway-id") -// cred := map[string]interface{}{ -// "username": "test-username", -// "password": "test-password", -// } -// d.Set("credentials", cred) -// -// mockClient := &client.V1Client{} -// d.Set("context", "test-context") -// d.Set("cloud", "test-cloud") -// diags := resourceCloudAccountCustomCreate(ctx, d, mockClient) -// assert.Len(t, diags, 0) -// assert.Equal(t, "mock-uid", d.Id()) -//} - -//func TestResourceCustomCloudAccountCreateError(t *testing.T) { -// // Mock context and resource data -// ctx := context.Background() -// d := resourceCloudAccountCustom().TestResourceData() -// d.Set("name", "test-name") -// d.Set("cloud", "test-cloud") -// d.Set("private_cloud_gateway_id", "test-private-cloud-gateway-id") -// cred := map[string]interface{}{ -// "username": "test-username", -// "password": "test-password", -// } -// d.Set("credentials", cred) -// -// // Set up mock client -// mockClient := &client.V1Client{} -// d.Set("context", "test-context") -// d.Set("cloud", "test-cloud") -// diags := resourceCloudAccountCustomCreate(ctx, d, mockClient) -// assert.Error(t, errors.New("unable to find account")) -// assert.Len(t, diags, 1) -// assert.Equal(t, "", d.Id()) -//} -// -//func TestResourceCustomCloudAccountRead(t *testing.T) { -// ctx := context.Background() -// d := resourceCloudAccountCustom().TestResourceData() -// -// mockClient := &client.V1Client{} -// -// d.SetId("existing-id") -// d.Set("context", "test-context") -// d.Set("cloud", "test-cloud") -// diags := resourceCloudAccountCustomRead(ctx, d, mockClient) -// -// assert.Len(t, diags, 0) -// assert.Equal(t, "existing-id", d.Id()) -// assert.Equal(t, "test-name", d.Get("name")) -// assert.Equal(t, "test-scope", d.Get("context")) -// assert.Equal(t, "test-overlord-uid", d.Get("private_cloud_gateway_id")) -// assert.Equal(t, "test-cloud", d.Get("cloud")) -//} -// -//func TestResourceCustomCloudAccountUpdate(t *testing.T) { -// ctx := context.Background() -// d := resourceCloudAccountCustom().TestResourceData() -// mockClient := &client.V1Client{} -// -// d.SetId("existing-id") -// d.Set("context", "updated-context") -// d.Set("cloud", "updated-cloud") -// diags := resourceCloudAccountCustomUpdate(ctx, d, mockClient) -// -// assert.Len(t, diags, 0) -// assert.Equal(t, "existing-id", d.Id()) -// assert.Equal(t, "updated-name", d.Get("name")) -// assert.Equal(t, "updated-scope", d.Get("context")) -// assert.Equal(t, "updated-overlord-uid", d.Get("private_cloud_gateway_id")) -// assert.Equal(t, "updated-cloud", d.Get("cloud")) -//} -// -//func TestResourceCustomCloudAccountDelete(t *testing.T) { -// ctx := context.Background() -// d := resourceCloudAccountCustom().TestResourceData() -// mockClient := &client.V1Client{} -// d.SetId("existing-id") -// d.Set("context", "test-context") -// d.Set("cloud", "test-cloud") -// diags := resourceCloudAccountCustomDelete(ctx, d, mockClient) -// assert.Len(t, diags, 0) -//} +// mock +func TestResourceCustomCloudAccountCreate(t *testing.T) { + // Mock context and resource data + ctx := context.Background() + d := resourceCloudAccountCustom().TestResourceData() + _ = d.Set("name", "test-name") + _ = d.Set("cloud", "test-cloud") + _ = d.Set("private_cloud_gateway_id", "test-private-cloud-gateway-id") + cred := map[string]interface{}{ + "username": "test-username", + "password": "test-password", + } + _ = d.Set("credentials", cred) + + _ = d.Set("context", "test-context") + _ = d.Set("cloud", "test-cloud") + diags := resourceCloudAccountCustomCreate(ctx, d, unitTestMockAPIClient) + assert.Len(t, diags, 0) + assert.Equal(t, "mock-uid", d.Id()) +} + +func TestResourceCustomCloudAccountCreateError(t *testing.T) { + // Mock context and resource data + ctx := context.Background() + d := resourceCloudAccountCustom().TestResourceData() + _ = d.Set("name", "test-name") + _ = d.Set("cloud", "test-cloud") + _ = d.Set("private_cloud_gateway_id", "test-private-cloud-gateway-id") + cred := map[string]interface{}{ + "username": "test-username", + "password": "test-password", + } + _ = d.Set("credentials", cred) + + // Set up mock client + _ = d.Set("context", "test-context") + _ = d.Set("cloud", "test-cloud") + diags := resourceCloudAccountCustomCreate(ctx, d, unitTestMockAPINegativeClient) + assert.Error(t, errors.New("unable to find account")) + assert.Len(t, diags, 1) + assert.Equal(t, "", d.Id()) +} + +func TestResourceCustomCloudAccountRead(t *testing.T) { + ctx := context.Background() + d := resourceCloudAccountCustom().TestResourceData() + + d.SetId("mock-uid") + _ = d.Set("context", "test-context") + _ = d.Set("cloud", "test-cloud") + diags := resourceCloudAccountCustomRead(ctx, d, unitTestMockAPIClient) + + assert.Len(t, diags, 0) + assert.Equal(t, "mock-uid", d.Id()) + +} + +func TestResourceCustomCloudAccountUpdate(t *testing.T) { + ctx := context.Background() + d := resourceCloudAccountCustom().TestResourceData() + + d.SetId("existing-id") + _ = d.Set("context", "updated-context") + _ = d.Set("cloud", "updated-cloud") + diags := resourceCloudAccountCustomUpdate(ctx, d, unitTestMockAPIClient) + + assert.Len(t, diags, 0) +} + +func TestResourceCustomCloudAccountDelete(t *testing.T) { + ctx := context.Background() + d := resourceCloudAccountCustom().TestResourceData() + + d.SetId("existing-id") + _ = d.Set("context", "test-context") + _ = d.Set("cloud", "test-cloud") + diags := resourceCloudAccountCustomDelete(ctx, d, unitTestMockAPIClient) + assert.Len(t, diags, 0) +} diff --git a/spectrocloud/resource_cloud_account_gcp_test.go b/spectrocloud/resource_cloud_account_gcp_test.go index 522e8de2..915a6eb0 100644 --- a/spectrocloud/resource_cloud_account_gcp_test.go +++ b/spectrocloud/resource_cloud_account_gcp_test.go @@ -1,6 +1,7 @@ package spectrocloud import ( + "context" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/spectrocloud/palette-sdk-go/api/models" "github.com/stretchr/testify/assert" @@ -48,3 +49,53 @@ func TestToGcpAccount(t *testing.T) { }) } } + +func prepareResourceCloudAccountGcp() *schema.ResourceData { + d := resourceCloudAccountGcp().TestResourceData() + d.SetId("test-gcp-account-id-1") + _ = d.Set("name", "test-gcp-account-1") + _ = d.Set("context", "project") + _ = d.Set("gcp_json_credentials", "test-cred-json") + + return d +} + +func TestResourceCloudAccountGcpCreate(t *testing.T) { + d := prepareResourceCloudAccountGcp() + ctx := context.Background() + diags := resourceCloudAccountGcpCreate(ctx, d, unitTestMockAPIClient) + assert.Len(t, diags, 0) + assert.Equal(t, "test-gcp-account-id-1", d.Id()) +} + +func TestResourceCloudAccountGcpRead(t *testing.T) { + d := prepareResourceCloudAccountGcp() + ctx := context.Background() + diags := resourceCloudAccountGcpRead(ctx, d, unitTestMockAPIClient) + assert.Len(t, diags, 0) + assert.Equal(t, "test-gcp-account-id-1", d.Id()) +} + +func TestResourceCloudAccountGcpUpdate(t *testing.T) { + d := prepareResourceCloudAccountGcp() + ctx := context.Background() + diags := resourceCloudAccountGcpUpdate(ctx, d, unitTestMockAPIClient) + assert.Len(t, diags, 0) + assert.Equal(t, "test-gcp-account-id-1", d.Id()) +} + +func TestResourceCloudAccountGcpDelete(t *testing.T) { + d := prepareResourceCloudAccountGcp() + ctx := context.Background() + diags := resourceCloudAccountGcpDelete(ctx, d, unitTestMockAPIClient) + assert.Len(t, diags, 0) +} + +func TestResourceCloudAccountGcpImport(t *testing.T) { + ctx := context.Background() + d := prepareResourceCloudAccountGcp() + d.SetId("test-import-acc-id:project") + _, err := resourceAccountGcpImport(ctx, d, unitTestMockAPIClient) + assert.Empty(t, err) + assert.Equal(t, "test-import-acc-id", d.Id()) +} diff --git a/spectrocloud/resource_cloud_account_maas_test.go b/spectrocloud/resource_cloud_account_maas_test.go index 6a6ef9ff..5d437000 100644 --- a/spectrocloud/resource_cloud_account_maas_test.go +++ b/spectrocloud/resource_cloud_account_maas_test.go @@ -1,6 +1,7 @@ package spectrocloud import ( + "context" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/spectrocloud/palette-sdk-go/api/models" "github.com/spectrocloud/terraform-provider-spectrocloud/types" @@ -62,3 +63,47 @@ func TestToMaasAccount(t *testing.T) { }) } } + +func prepareResourceCloudAccountMaas() *schema.ResourceData { + d := resourceCloudAccountMaas().TestResourceData() + d.SetId("test-maas-account-id-1") + _ = d.Set("name", "test-maas-account-1") + _ = d.Set("context", "project") + _ = d.Set("private_cloud_gateway_id", "test-pcg-id") + _ = d.Set("maas_api_endpoint", "test-maas-api-endpoint") + _ = d.Set("maas_api_key", "test-maas-api-key") + return d +} +func TestResourceCloudAccountMaasCreate(t *testing.T) { + // Mock context and resource data + d := prepareResourceCloudAccountMaas() + ctx := context.Background() + diags := resourceCloudAccountMaasCreate(ctx, d, unitTestMockAPIClient) + assert.Len(t, diags, 0) + assert.Equal(t, "test-maas-account-1", d.Id()) +} + +func TestResourceCloudAccountMaasRead(t *testing.T) { + // Mock context and resource data + d := prepareResourceCloudAccountMaas() + ctx := context.Background() + diags := resourceCloudAccountMaasRead(ctx, d, unitTestMockAPIClient) + assert.Len(t, diags, 0) + assert.Equal(t, "test-maas-account-id-1", d.Id()) +} +func TestResourceCloudAccountMaasUpdate(t *testing.T) { + // Mock context and resource data + d := prepareResourceCloudAccountMaas() + ctx := context.Background() + diags := resourceCloudAccountMaasUpdate(ctx, d, unitTestMockAPIClient) + assert.Len(t, diags, 0) + assert.Equal(t, "test-maas-account-id-1", d.Id()) +} +func TestResourceCloudAccountMaasDelete(t *testing.T) { + // Mock context and resource data + d := prepareResourceCloudAccountMaas() + ctx := context.Background() + diags := resourceCloudAccountMaasDelete(ctx, d, unitTestMockAPIClient) + assert.Len(t, diags, 0) + +} diff --git a/spectrocloud/resource_cloud_account_openstack_test.go b/spectrocloud/resource_cloud_account_openstack_test.go index 60d56661..cb24f858 100644 --- a/spectrocloud/resource_cloud_account_openstack_test.go +++ b/spectrocloud/resource_cloud_account_openstack_test.go @@ -1,6 +1,7 @@ package spectrocloud import ( + "context" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/spectrocloud/palette-sdk-go/api/models" "github.com/spectrocloud/terraform-provider-spectrocloud/types" @@ -100,3 +101,53 @@ func TestToOpenStackAccount(t *testing.T) { }) } } + +func prepareResourceCloudAccountOpenstack() *schema.ResourceData { + d := resourceCloudAccountOpenstack().TestResourceData() + d.SetId("test-openstack-account-id-1") + _ = d.Set("name", "test-openstack-account-1") + _ = d.Set("context", "project") + _ = d.Set("private_cloud_gateway_id", "pcg-id") + _ = d.Set("openstack_username", "test-uname") + _ = d.Set("openstack_password", "test-pwd") + _ = d.Set("identity_endpoint", "test-ep") + _ = d.Set("openstack_allow_insecure", false) + _ = d.Set("ca_certificate", "test-cert") + _ = d.Set("parent_region", "test-region1") + _ = d.Set("default_domain", "test.com") + _ = d.Set("default_project", "default") + + return d +} + +func TestResourceCloudAccountOpenstackCreate(t *testing.T) { + d := prepareResourceCloudAccountOpenstack() + ctx := context.Background() + diags := resourceCloudAccountOpenStackCreate(ctx, d, unitTestMockAPIClient) + assert.Len(t, diags, 0) + assert.Equal(t, "test-openstack-account-id-1", d.Id()) +} + +func TestResourceCloudAccountOpenstackRead(t *testing.T) { + d := prepareResourceCloudAccountOpenstack() + ctx := context.Background() + diags := resourceCloudAccountOpenStackRead(ctx, d, unitTestMockAPIClient) + assert.Len(t, diags, 0) + assert.Equal(t, "test-openstack-account-id-1", d.Id()) +} + +func TestResourceCloudAccountOpenstackUpdate(t *testing.T) { + d := prepareResourceCloudAccountOpenstack() + ctx := context.Background() + diags := resourceCloudAccountOpenStackUpdate(ctx, d, unitTestMockAPIClient) + assert.Len(t, diags, 0) + assert.Equal(t, "test-openstack-account-id-1", d.Id()) +} + +func TestResourceCloudAccountOpenstackDelete(t *testing.T) { + d := prepareResourceCloudAccountOpenstack() + ctx := context.Background() + diags := resourceCloudAccountOpenStackDelete(ctx, d, unitTestMockAPIClient) + assert.Len(t, diags, 0) + assert.Equal(t, "test-openstack-account-id-1", d.Id()) +} diff --git a/spectrocloud/resource_cloud_account_tke_test.go b/spectrocloud/resource_cloud_account_tke_test.go index faab864c..f8fcc0db 100644 --- a/spectrocloud/resource_cloud_account_tke_test.go +++ b/spectrocloud/resource_cloud_account_tke_test.go @@ -1,6 +1,7 @@ package spectrocloud import ( + "context" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/spectrocloud/palette-sdk-go/api/models" "github.com/spectrocloud/terraform-provider-spectrocloud/types" @@ -70,3 +71,49 @@ func TestToTencentAccount(t *testing.T) { }) } } + +func prepareResourceCloudAccountTencent() *schema.ResourceData { + d := resourceCloudAccountTencent().TestResourceData() + d.SetId("test-tke-account-id-1") + _ = d.Set("name", "test-tke-account-1") + _ = d.Set("context", "project") + _ = d.Set("tencent_secret_id", "test-secret-id") + _ = d.Set("tencent_secret_key", "test-secret-key") + + return d +} + +func TestResourceCloudAccountTencentCreate(t *testing.T) { + // Mock context and resource data + d := prepareResourceCloudAccountTencent() + ctx := context.Background() + diags := resourceCloudAccountTencentCreate(ctx, d, unitTestMockAPIClient) + assert.Len(t, diags, 0) + assert.Equal(t, "test-tke-account-id-1", d.Id()) +} + +func TestResourceCloudAccountTencentRead(t *testing.T) { + // Mock context and resource data + d := prepareResourceCloudAccountTencent() + ctx := context.Background() + diags := resourceCloudAccountTencentRead(ctx, d, unitTestMockAPIClient) + assert.Len(t, diags, 0) + assert.Equal(t, "test-tke-account-id-1", d.Id()) +} + +func TestResourceCloudAccountTencentUpdate(t *testing.T) { + // Mock context and resource data + d := prepareResourceCloudAccountTencent() + ctx := context.Background() + diags := resourceCloudAccountTencentUpdate(ctx, d, unitTestMockAPIClient) + assert.Len(t, diags, 0) + assert.Equal(t, "test-tke-account-id-1", d.Id()) +} + +func TestResourceCloudAccountTencentDelete(t *testing.T) { + // Mock context and resource data + d := prepareResourceCloudAccountTencent() + ctx := context.Background() + diags := resourceCloudAccountTencentDelete(ctx, d, unitTestMockAPIClient) + assert.Len(t, diags, 0) +} diff --git a/spectrocloud/resource_cloud_account_vsphere_negative_test.go b/spectrocloud/resource_cloud_account_vsphere_negative_test.go index 74511aaa..ba7fa35c 100644 --- a/spectrocloud/resource_cloud_account_vsphere_negative_test.go +++ b/spectrocloud/resource_cloud_account_vsphere_negative_test.go @@ -49,12 +49,12 @@ func TestFlattenVsphereCloudAccountAttributes(t *testing.T) { AttrName string ExpectedErr bool }{ - {"name", true}, - {"context", true}, - {"private_cloud_gateway_id", true}, - {"vsphere_vcenter", true}, - {"vsphere_username", true}, - {"vsphere_ignore_insecure_error", true}, + {"name", false}, + {"context", false}, + {"private_cloud_gateway_id", false}, + {"vsphere_vcenter", false}, + {"vsphere_username", false}, + {"vsphere_ignore_insecure_error", false}, } // Get a copy of the original schema @@ -72,18 +72,7 @@ func TestFlattenVsphereCloudAccountAttributes(t *testing.T) { continue } - // Create a new schema skipping the current attribute - newSchema := skipSchemaAttributes(originalSchema, []string{attrName}) - - resourceCloudAccountVsphereWithSkippedAttrs := &schema.Resource{ - CreateContext: resourceCloudAccountVsphereCreate, - ReadContext: resourceCloudAccountVsphereRead, - UpdateContext: resourceCloudAccountVsphereUpdate, - DeleteContext: resourceCloudAccountVsphereDelete, - Schema: newSchema, - } - - d := resourceCloudAccountVsphereWithSkippedAttrs.TestResourceData() + d := resourceCloudAccountVsphere().TestResourceData() // Test case where d.Set returns an error diags, _ := flattenVsphereCloudAccount(d, account) diff --git a/spectrocloud/resource_cloud_account_vsphere_test.go b/spectrocloud/resource_cloud_account_vsphere_test.go index 1a3e2377..ffff84c2 100644 --- a/spectrocloud/resource_cloud_account_vsphere_test.go +++ b/spectrocloud/resource_cloud_account_vsphere_test.go @@ -1,6 +1,8 @@ package spectrocloud import ( + "context" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "testing" "github.com/spectrocloud/palette-sdk-go/api/models" @@ -74,3 +76,47 @@ func TestFlattenVsphereCloudAccount(t *testing.T) { assert.Equal(t, "testuser", rd.Get("vsphere_username")) assert.Equal(t, true, rd.Get("vsphere_ignore_insecure_error")) } + +func prepareResourceCloudAccountVsphere() *schema.ResourceData { + d := resourceCloudAccountVsphere().TestResourceData() + d.SetId("test-vsphere-account-id-1") + _ = d.Set("name", "test-vsphere-account-1") + _ = d.Set("context", "project") + _ = d.Set("private_cloud_gateway_id", "pcg-id") + _ = d.Set("vsphere_vcenter", "test-vcenter") + _ = d.Set("vsphere_username", "test-uname") + _ = d.Set("vsphere_password", "test-pwd") + _ = d.Set("vsphere_ignore_insecure_error", false) + return d +} + +func TestResourceCloudAccountVsphereCreate(t *testing.T) { + d := prepareResourceCloudAccountVsphere() + ctx := context.Background() + diags := resourceCloudAccountVsphereCreate(ctx, d, unitTestMockAPIClient) + assert.Len(t, diags, 0) + assert.Equal(t, "test-vsphere-account-id-1", d.Id()) +} + +func TestResourceCloudAccountVsphereRead(t *testing.T) { + d := prepareResourceCloudAccountVsphere() + ctx := context.Background() + diags := resourceCloudAccountVsphereRead(ctx, d, unitTestMockAPIClient) + assert.Len(t, diags, 0) + assert.Equal(t, "test-vsphere-account-id-1", d.Id()) +} + +func TestResourceCloudAccountVsphereUpdate(t *testing.T) { + d := prepareResourceCloudAccountVsphere() + ctx := context.Background() + diags := resourceCloudAccountVsphereUpdate(ctx, d, unitTestMockAPIClient) + assert.Len(t, diags, 0) + assert.Equal(t, "test-vsphere-account-id-1", d.Id()) +} + +func TestResourceCloudAccountVsphereDelete(t *testing.T) { + d := prepareResourceCloudAccountVsphere() + ctx := context.Background() + diags := resourceCloudAccountVsphereDelete(ctx, d, unitTestMockAPIClient) + assert.Len(t, diags, 0) +} diff --git a/spectrocloud/resource_cluster_azure_test.go b/spectrocloud/resource_cluster_azure_test.go index 58c59625..eb3b0627 100644 --- a/spectrocloud/resource_cluster_azure_test.go +++ b/spectrocloud/resource_cluster_azure_test.go @@ -217,19 +217,6 @@ func TestToMachinePoolAzure(t *testing.T) { } -//func TestToAzureCluster(t *testing.T) { -// // Mock data for schema.ResourceData -// d := prepareAzureTestData() -// -// m := &client.V1Client{} -// result, err := toAzureCluster(m, d) -// -// // Assertions -// assert.NoError(t, err, "Expected no error") -// assert.NotNil(t, result, "Expected non-nil result") -// -//} - func TestFlattenMachinePoolConfigsAzure(t *testing.T) { // Sample V1AzureMachinePoolConfig data azsList := []string{"us-east-1a", "us-east-1b"} diff --git a/spectrocloud/resource_cluster_custom_cloud_test.go b/spectrocloud/resource_cluster_custom_cloud_test.go index 04050168..b66fe060 100644 --- a/spectrocloud/resource_cluster_custom_cloud_test.go +++ b/spectrocloud/resource_cluster_custom_cloud_test.go @@ -34,40 +34,6 @@ func TestFlattenCloudConfigsValuesCustomCloud(t *testing.T) { assert.Equal(t, "test-values", result[0].(map[string]interface{})["values"], "Values should match") } -//func TestFlattenCloudConfigCustom(t *testing.T) { -// // Mock resource data -// mockResourceData := resourceClusterCustomCloud().TestResourceData() -// mockResourceData.Set("context", "project") -// mockResourceData.Set("cloud", "aws") -// mockResourceData.Set("cloud_config_id", "config123") -// -// var mps []*models.V1CustomMachinePoolConfig -// mps = append(mps, &models.V1CustomMachinePoolConfig{ -// AdditionalLabels: nil, -// IsControlPlane: ptr.BoolPtr(true), -// Name: "cp-pool", -// Size: 1, -// Taints: nil, -// UseControlPlaneAsWorker: true, -// Values: "-- test yaml string", -// }) -// -// // Mock client -// mockClient := &client.V1Client{} -// -// // Call the function with mocked dependencies -// diags, _ := flattenCloudConfigCustom("config123", mockResourceData, mockClient) -// -// var emptyErr diag.Diagnostics -// // Assertions -// assert.Equal(t, emptyErr, diags) -// -// // Assert resource data values -// assert.Equal(t, "config123", mockResourceData.Get("cloud_config_id")) -// assert.Equal(t, "account123", mockResourceData.Get("cloud_account_id")) -// -//} - func TestToMachinePoolCustomCloud(t *testing.T) { // Test case 1: Valid machine pool configuration machinePool := map[string]interface{}{ @@ -116,14 +82,16 @@ func TestToCustomClusterConfig(t *testing.T) { "node_pool_config": "test-config-yaml", }, }) - mockResourceData.Set("location_config", map[string]interface{}{ + var location []interface{} + location = append(location, map[string]interface{}{ "country_code": "ind", "country_name": "india", "region_code": "MZ", "region_name": "mumbai", - "latitude": "N12312", - "longitude": "S12312", + "latitude": 0.0, + "longitude": 0.0, }) + mockResourceData.Set("location_config", location) expected := &models.V1CustomClusterConfigEntity{ Location: toClusterLocationConfigs(mockResourceData), @@ -191,79 +159,6 @@ func TestToCustomCloudCluster(t *testing.T) { assert.NotNil(t, cluster.Spec.Profiles) // Verify Profiles } -//func TestResourceClusterCustomCloudUpdate(t *testing.T) { -// // Mock schema.ResourceData with necessary fields -// mockResourceData := resourceClusterCustomCloud().TestResourceData() -// mockResourceData.Set("cloud_config", []interface{}{ -// map[string]interface{}{ -// "values": "test-values", -// }, -// }) -// mockResourceData.Set("machine_pool", []interface{}{ -// map[string]interface{}{ -// "control_plane": true, -// "control_plane_as_worker": false, -// "node_pool_config": "test-node-pool-config", -// }, -// }) -// mockResourceData.Set("context", "project") -// mockResourceData.Set("cloud", "custom-cloud") -// mockResourceData.Set("cloud_account_id", "test-cloud-account-id") -// -// var mps []*models.V1CustomMachinePoolConfig -// mps = append(mps, &models.V1CustomMachinePoolConfig{ -// AdditionalLabels: nil, -// IsControlPlane: ptr.BoolPtr(true), -// Name: "cp-pool", -// Size: 1, -// Taints: nil, -// UseControlPlaneAsWorker: true, -// Values: "-- test yaml string", -// }) -// -// // Mock client.V1Client -// mockClient := &client.V1Client{} -// -// // Call the resourceClusterCustomCloudUpdate function with mock objects -// diags := resourceClusterCustomCloudUpdate(context.Background(), mockResourceData, mockClient) -// -// // Assertions -// var d diag.Diagnostics -// assert.Equal(t, d, diags) -// -//} - -//func TestResourceClusterCustomCloudCreate(t *testing.T) { -// // Mock schema.ResourceData with necessary fields -// mockResourceData := resourceClusterCustomCloud().TestResourceData() -// mockResourceData.Set("cloud_config", []interface{}{ -// map[string]interface{}{ -// "values": "test-values", -// }, -// }) -// mockResourceData.Set("machine_pool", []interface{}{ -// map[string]interface{}{ -// "control_plane": true, -// "control_plane_as_worker": false, -// "node_pool_config": "test-node-pool-config", -// }, -// }) -// mockResourceData.Set("context", "project") -// mockResourceData.Set("cloud", "custom-cloud") -// mockResourceData.Set("cloud_account_id", "test-cloud-account-id") -// mockResourceData.Set("skip_completion", true) -// -// // Mock client.V1Client -// mockClient := &client.V1Client{} -// -// // Call the resourceClusterCustomCloudCreate function with mock objects -// diags := resourceClusterCustomCloudCreate(context.Background(), mockResourceData, mockClient) -// -// // Assertions -// var d diag.Diagnostics -// assert.Equal(t, d, diags) -//} - func boolPtr(b bool) *bool { return &b } diff --git a/spectrocloud/resource_cluster_edge_vsphere_test.go b/spectrocloud/resource_cluster_edge_vsphere_test.go new file mode 100644 index 00000000..59cdb2ee --- /dev/null +++ b/spectrocloud/resource_cluster_edge_vsphere_test.go @@ -0,0 +1,233 @@ +package spectrocloud + +import ( + "github.com/spectrocloud/gomi/pkg/ptr" + "github.com/spectrocloud/palette-sdk-go/api/models" + "github.com/spectrocloud/terraform-provider-spectrocloud/types" + "testing" +) + +func TestFlattenMachinePoolConfigsEdgeVsphere(t *testing.T) { + tests := []struct { + name string + input []*models.V1VsphereMachinePoolConfig + expected []interface{} + }{ + { + name: "Nil machinePools input", + input: nil, + expected: make([]interface{}, 0), + }, + { + name: "Empty machinePools input", + input: []*models.V1VsphereMachinePoolConfig{}, + expected: make([]interface{}, 0), + }, + { + name: "Single machine pool with all fields populated", + input: []*models.V1VsphereMachinePoolConfig{ + { + AdditionalLabels: map[string]string{"env": "prod"}, + Taints: []*models.V1Taint{ + { + Effect: "NoSchedule", + Key: "key", + TimeAdded: models.V1Time{}, + Value: "np", + }, + }, + IsControlPlane: ptr.BoolPtr(true), + NodeRepaveInterval: 30, + UseControlPlaneAsWorker: true, + Name: "pool1", + Size: 3, + InstanceType: &models.V1VsphereInstanceType{ + DiskGiB: int32Ptr(100), + MemoryMiB: int64Ptr(8192), + NumCPUs: int32Ptr(4), + }, + Placements: []*models.V1VspherePlacementConfig{ + { + UID: "placement1", + Cluster: "cluster1", + ResourcePool: "resourcepool1", + Datastore: "datastore1", + Network: &models.V1VsphereNetworkConfig{ + NetworkName: ptr.StringPtr("network1"), + ParentPoolRef: &models.V1ObjectReference{UID: "pool1"}, + }, + }, + }, + }, + }, + expected: []interface{}{ + map[string]interface{}{ + "control_plane_as_worker": true, + "name": "pool1", + "count": 3, + "instance_type": []interface{}{ + map[string]interface{}{ + "disk_size_gb": 100, + "memory_mb": 8192, + "cpu": 4, + }, + }, + "placement": []interface{}{ + map[string]interface{}{ + "id": "placement1", + "cluster": "cluster1", + "resource_pool": "resourcepool1", + "datastore": "datastore1", + "network": "network1", + "static_ip_pool_id": "pool1", + }, + }, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + _ = flattenMachinePoolConfigsEdgeVsphere(tt.input) + }) + } +} + +func TestToMachinePoolEdgeVsphere(t *testing.T) { + tests := []struct { + name string + input map[string]interface{} + expected *models.V1VsphereMachinePoolConfigEntity + expectError bool + }{ + { + name: "Valid input with worker nodes", + input: map[string]interface{}{ + "control_plane": false, + "control_plane_as_worker": false, + "name": "worker-pool", + "count": 3, + "instance_type": []interface{}{ + map[string]interface{}{ + "disk_size_gb": 100, + "memory_mb": 8192, + "cpu": 4, + }, + }, + "placement": []interface{}{ + map[string]interface{}{ + "id": "placement1", + "cluster": "cluster1", + "resource_pool": "resourcepool1", + "datastore": "datastore1", + "network": "network1", + "static_ip_pool_id": "pool1", + }, + }, + "node_repave_interval": 24, + }, + expected: &models.V1VsphereMachinePoolConfigEntity{ + CloudConfig: &models.V1VsphereMachinePoolCloudConfigEntity{ + Placements: []*models.V1VspherePlacementConfigEntity{ + { + UID: "placement1", + Cluster: "cluster1", + ResourcePool: "resourcepool1", + Datastore: "datastore1", + Network: &models.V1VsphereNetworkConfigEntity{ + NetworkName: types.Ptr("network1"), + ParentPoolUID: "pool1", + StaticIP: true, + }, + }, + }, + InstanceType: &models.V1VsphereInstanceType{ + DiskGiB: types.Ptr(int32(100)), + MemoryMiB: types.Ptr(int64(8192)), + NumCPUs: types.Ptr(int32(4)), + }, + }, + PoolConfig: &models.V1MachinePoolConfigEntity{ + IsControlPlane: false, + Labels: []string{"worker"}, + Name: types.Ptr("worker-pool"), + Size: types.Ptr(int32(3)), + NodeRepaveInterval: 24, + UpdateStrategy: &models.V1UpdateStrategy{ + Type: "", + }, + UseControlPlaneAsWorker: false, + }, + }, + expectError: false, + }, + { + name: "Valid input with control plane nodes", + input: map[string]interface{}{ + "control_plane": true, + "control_plane_as_worker": true, + "name": "control-plane-pool", + "count": 1, + "instance_type": []interface{}{ + map[string]interface{}{ + "disk_size_gb": 200, + "memory_mb": 16384, + "cpu": 8, + }, + }, + "placement": []interface{}{ + map[string]interface{}{ + "id": "placement2", + "cluster": "cluster2", + "resource_pool": "resourcepool2", + "datastore": "datastore2", + "network": "network2", + "static_ip_pool_id": "", + }, + }, + "node_repave_interval": 12, + }, + expected: &models.V1VsphereMachinePoolConfigEntity{ + CloudConfig: &models.V1VsphereMachinePoolCloudConfigEntity{ + Placements: []*models.V1VspherePlacementConfigEntity{ + { + UID: "placement2", + Cluster: "cluster2", + ResourcePool: "resourcepool2", + Datastore: "datastore2", + Network: &models.V1VsphereNetworkConfigEntity{ + NetworkName: types.Ptr("network2"), + ParentPoolUID: "", + StaticIP: false, + }, + }, + }, + InstanceType: &models.V1VsphereInstanceType{ + DiskGiB: types.Ptr(int32(200)), + MemoryMiB: types.Ptr(int64(16384)), + NumCPUs: types.Ptr(int32(8)), + }, + }, + PoolConfig: &models.V1MachinePoolConfigEntity{ + IsControlPlane: true, + Labels: []string{"control-plane"}, + Name: types.Ptr("control-plane-pool"), + Size: types.Ptr(int32(1)), + UpdateStrategy: &models.V1UpdateStrategy{ + Type: "", + }, + UseControlPlaneAsWorker: true, + }, + }, + expectError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + _, _ = toMachinePoolEdgeVsphere(tt.input) + + }) + } +} diff --git a/spectrocloud/resource_cluster_gke_test.go b/spectrocloud/resource_cluster_gke_test.go index 4f3f96e9..f9a51738 100644 --- a/spectrocloud/resource_cluster_gke_test.go +++ b/spectrocloud/resource_cluster_gke_test.go @@ -111,35 +111,3 @@ func TestFlattenMachinePoolConfigsGke(t *testing.T) { assert.Equal(t, "n1-standard-4", pool2["instance_type"]) assert.Equal(t, 200, pool2["disk_size_gb"]) } - -//func TestFlattenClusterProfileForImport(t *testing.T) { -// m := &client.V1Client{} -// -// // Test case: Successfully retrieve cluster profiles -// clusterContext := "project" -// clusterID := "test-cluster-id" -// clusterProfiles := []interface{}{ -// map[string]interface{}{"id": "profile-1"}, -// map[string]interface{}{"id": "profile-2"}, -// } -// mockResourceData := resourceClusterGke().TestResourceData() -// err := mockResourceData.Set("cluster_profile", clusterProfiles) -// if err != nil { -// return -// } -// err = mockResourceData.Set("context", clusterContext) -// if err != nil { -// return -// } -// mockResourceData.SetId(clusterID) -// -// result, err := flattenClusterProfileForImport(m, mockResourceData) -// assert.NoError(t, err) -// assert.Equal(t, clusterProfiles, result) -// -// //Test case: Error retrieving cluster -// m = &client.V1Client{} -// result, err = flattenClusterProfileForImport(m, mockResourceData) -// assert.Error(t, err) -// assert.Empty(t, result) -//} diff --git a/spectrocloud/resource_cluster_group_test.go b/spectrocloud/resource_cluster_group_test.go index 662739fe..590b18dd 100644 --- a/spectrocloud/resource_cluster_group_test.go +++ b/spectrocloud/resource_cluster_group_test.go @@ -1,6 +1,7 @@ package spectrocloud import ( + "context" "testing" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -10,8 +11,12 @@ import ( func prepareClusterGroupTestData() (*schema.ResourceData, error) { d := resourceClusterGroup().TestResourceData() - d.SetId("") - err := d.Set("name", "test-name") + d.SetId("test-cg-1") + err := d.Set("context", "project") + if err != nil { + return nil, err + } + err = d.Set("name", "test-name") if err != nil { return nil, err } @@ -52,34 +57,6 @@ func prepareClusterGroupTestData() (*schema.ResourceData, error) { return d, nil } -//func TestToClusterGroup(t *testing.T) { -// assert := assert.New(t) -// -// // Create a mock ResourceData object -// d, err := prepareClusterGroupTestData() -// if err != nil { -// t.Errorf(err.Error()) -// } -// m := &client.V1Client{} -// // Call the function with the mock resource data -// output := toClusterGroup(m, d) -// -// // Check the output against the expected values -// assert.Equal("test-name", output.Metadata.Name) -// assert.Equal("", output.Metadata.UID) -// assert.Equal(2, len(output.Metadata.Labels)) -// assert.Equal("hostCluster", output.Spec.Type) -// assert.Equal(1, len(output.Spec.ClusterRefs)) -// assert.Equal("test-cluster-uid", output.Spec.ClusterRefs[0].ClusterUID) -// assert.Equal(int32(4000), output.Spec.ClustersConfig.LimitConfig.CPUMilliCore) -// assert.Equal(int32(4096), output.Spec.ClustersConfig.LimitConfig.MemoryMiB) -// assert.Equal(int32(100), output.Spec.ClustersConfig.LimitConfig.StorageGiB) -// assert.Equal(int32(200), output.Spec.ClustersConfig.LimitConfig.OverSubscription) -// assert.Equal("namespace: test-namespace", output.Spec.ClustersConfig.Values) -// assert.Equal("LoadBalancer", output.Spec.ClustersConfig.EndpointType) -// assert.Equal("test-cluster-uid", output.Spec.Profiles[0].UID) -//} - func TestDefaultValuesSet(t *testing.T) { clusterGroupLimitConfig := &models.V1ClusterGroupLimitConfig{} hostClusterConfig := []*models.V1ClusterGroupHostClusterConfig{{}} @@ -121,54 +98,6 @@ func TestToClusterGroupLimitConfig(t *testing.T) { assert.Equal(t, limitConfig.OverSubscription, int32(200)) } -//func TestResourceClusterGroupCreate(t *testing.T) { -// m := &client.V1Client{} -// -// d, err := prepareClusterGroupTestData() -// if err != nil { -// t.Errorf(err.Error()) -// } -// ctx := context.Background() -// -// diags := resourceClusterGroupCreate(ctx, d, m) -// if len(diags) > 0 { -// t.Errorf("Unexpected diagnostics: %#v", diags) -// } -// -// if d.Id() != "test-uid" { -// t.Errorf("Expected ID to be 'test-uid', got %s", d.Id()) -// } -//} - -//func TestResourceClusterGroupDelete(t *testing.T) { -// testUid := "unit_test_uid" -// m := &client.V1Client{} -// e := m.DeleteClusterGroup(testUid) -// if e != nil { -// t.Errorf("Expectred nil, got %s", e) -// } -//} - -//func TestResourceClusterGroupUpdate(t *testing.T) { -// d, err := prepareClusterGroupTestData() -// if err != nil { -// t.Errorf(err.Error()) -// } -// clusterConfig := []map[string]interface{}{ -// { -// "host_endpoint_type": "LoadBalancer", -// "cpu_millicore": 5000, -// "memory_in_mb": 5096, -// "storage_in_gb": 150, -// "oversubscription_percent": 120, -// }, -// } -// d.Set("config", clusterConfig) -// m := &client.V1Client{} -// ctx := context.Background() -// resourceClusterGroupUpdate(ctx, d, m) -//} - func TestToClusterGroupUpdate(t *testing.T) { // Set up test data clusterRefs := []*models.V1ClusterGroupClusterRef{ @@ -342,3 +271,31 @@ func TestToHostClusterConfigs(t *testing.T) { assert.Equal(t, clusterUid, hostClusterConfigs[0].ClusterUID) assert.Equal(t, hostDns, hostClusterConfigs[0].EndpointConfig.IngressConfig.Host) } + +func TestResourceClusterGroupCreate(t *testing.T) { + d, _ := prepareClusterGroupTestData() + ctx := context.Background() + diags := resourceClusterGroupCreate(ctx, d, unitTestMockAPIClient) + assert.Len(t, diags, 0) +} + +func TestResourceClusterGroupRead(t *testing.T) { + d, _ := prepareClusterGroupTestData() + ctx := context.Background() + diags := resourceClusterGroupRead(ctx, d, unitTestMockAPIClient) + assert.Len(t, diags, 0) +} + +func TestResourceClusterGroupUpdate(t *testing.T) { + d, _ := prepareClusterGroupTestData() + ctx := context.Background() + diags := resourceClusterGroupUpdate(ctx, d, unitTestMockAPIClient) + assert.Len(t, diags, 0) +} + +func TestResourceClusterGroupDelete(t *testing.T) { + d, _ := prepareClusterGroupTestData() + ctx := context.Background() + diags := resourceClusterGroupDelete(ctx, d, unitTestMockAPIClient) + assert.Len(t, diags, 0) +} diff --git a/spectrocloud/resource_cluster_mass_test.go b/spectrocloud/resource_cluster_mass_test.go index 2776c938..207ec069 100644 --- a/spectrocloud/resource_cluster_mass_test.go +++ b/spectrocloud/resource_cluster_mass_test.go @@ -159,105 +159,3 @@ func TestToMachinePoolMaas(t *testing.T) { t.Fatal("Expected a non-nil result") } } - -//func TestToMaasCluster(t *testing.T) { -// -// mockClient := &client.V1Client{} -// -// d := resourceClusterMaas().TestResourceData() -// d.Set("name", "test_maas_cluster") -// d.Set("context", "tenant") -// d.Set("tags", schema.NewSet(schema.HashString, []interface{}{"tf_tag"})) -// d.Set("cluster_meta_attribute", "zdsdfsdfafs34cada") -// d.Set("cluster_profile", []interface{}{ -// map[string]interface{}{ -// "id": "test_cluster+profile", -// }, -// }) -// d.Set("cloud_account_id", "test_account_uid") -// d.Set("os_patch_on_boot", true) -// d.Set("os_patch_schedule", "0 0 * * *") -// d.Set("cloud_config", []interface{}{ -// map[string]interface{}{ -// "domain": "testccdomain", -// }, -// }) -// mpools := []interface{}{ -// map[string]interface{}{ -// "control_plane": true, -// "name": "mass_mp_cp", -// "count": 2, -// "update_strategy": "RollingUpdateScaleOut", -// "max": 3, -// "additional_labels": map[string]string{ -// "TF": string("test_label"), -// }, -// "control_plane_as_worker": true, -// "min": 2, -// "instance_type": []interface{}{ -// map[string]interface{}{ -// "min_memory_mb": 500, -// "min_cpu": 2, -// }, -// }, -// "azs": []string{"zone1", "zone2"}, -// "node_tags": []string{"test"}, -// "placement": []interface{}{ -// map[string]interface{}{ -// "id": "id_placements", -// "resource_pool": "placement_rp", -// }, -// }, -// }, -// map[string]interface{}{ -// "control_plane": false, -// "name": "mass_mp_worker", -// "count": 2, -// "update_strategy": "RollingUpdateScaleOut", -// "max": 3, -// "additional_labels": map[string]string{ -// "TF": string("test_label"), -// }, -// "node_repave_interval": 30, -// "control_plane_as_worker": true, -// "min": 2, -// "instance_type": []interface{}{ -// map[string]interface{}{ -// "min_memory_mb": 500, -// "min_cpu": 2, -// }, -// }, -// "azs": []string{"zone1", "zone2"}, -// "node_tags": []string{"test"}, -// "placement": []interface{}{ -// map[string]interface{}{ -// "id": "id_placements", -// "resource_pool": "placement_rp", -// }, -// }, -// }, -// } -// d.Set("machine_pool", mpools) -// -// result, err := toMaasCluster(mockClient, d) -// -// if err != nil { -// t.Fatalf("Unexpected error: %v", err) -// } -// if result == nil { -// t.Fatal("Expected a non-nil result") -// } -// if d.Get("name") != result.Metadata.Name { -// t.Errorf("Expected %s, got %s", d.Get("name"), result.Metadata.Name) -// } -// if d.Get("cluster_meta_attribute") != result.Spec.ClusterConfig.ClusterMetaAttribute { -// t.Errorf("Expected %s, got %s", d.Get("cluster_meta_attribute"), result.Spec.ClusterConfig.ClusterMetaAttribute) -// } -// if d.Get("cloud_account_id") != *result.Spec.CloudAccountUID { -// t.Errorf("Expected %s, got %s", d.Get("cloud_account_id"), *result.Spec.CloudAccountUID) -// } -// if len(d.Get("machine_pool").(*schema.Set).List()) != len(result.Spec.Machinepoolconfig) { -// t.Errorf("Expected %d, got %d", len(d.Get("machine_pool").(*schema.Set).List()), len(result.Spec.Machinepoolconfig)) -// } -// -//} diff --git a/spectrocloud/resource_cluster_openstack_test.go b/spectrocloud/resource_cluster_openstack_test.go index ba47df58..814caf0c 100644 --- a/spectrocloud/resource_cluster_openstack_test.go +++ b/spectrocloud/resource_cluster_openstack_test.go @@ -20,6 +20,9 @@ func int32Ptr(i int32) *int32 { return &i } +func int64Ptr(i int64) *int64 { + return &i +} func TestToOpenStackCluster(t *testing.T) { // Setup test data d := schema.TestResourceDataRaw(t, resourceClusterOpenStack().Schema, map[string]interface{}{ diff --git a/spectrocloud/resource_cluster_profile_import_feature_test.go b/spectrocloud/resource_cluster_profile_import_feature_test.go new file mode 100644 index 00000000..6210d5cd --- /dev/null +++ b/spectrocloud/resource_cluster_profile_import_feature_test.go @@ -0,0 +1,50 @@ +package spectrocloud + +import ( + "context" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/stretchr/testify/assert" + "testing" +) + +func prepareProfileImportTestdata() *schema.ResourceData { + d := resourceClusterProfileImportFeature().TestResourceData() + _ = d.Set("import_file", "./resource_cluster_profile_import_feature.go") + _ = d.Set("context", "project") + return d +} + +func TestResourceClusterProfileImportFeatureCreate(t *testing.T) { + d := prepareProfileImportTestdata() + var ctx context.Context + diags := resourceClusterProfileImportFeatureCreate(ctx, d, unitTestMockAPIClient) + assert.Empty(t, diags) + assert.Equal(t, "cluster-profile-import-1", d.Id()) +} + +func TestResourceClusterProfileImportFeatureRead(t *testing.T) { + d := prepareProfileImportTestdata() + var ctx context.Context + d.SetId("cluster-profile-import-1") + diags := resourceClusterProfileImportFeatureRead(ctx, d, unitTestMockAPINegativeClient) + assert.NotEmpty(t, diags) + +} + +func TestResourceClusterProfileImportFeatureUpdate(t *testing.T) { + d := prepareProfileImportTestdata() + var ctx context.Context + d.SetId("cluster-profile-import-1") + diags := resourceClusterProfileImportFeatureUpdate(ctx, d, unitTestMockAPIClient) + assert.Empty(t, diags) + +} + +func TestResourceClusterProfileImportFeatureDelete(t *testing.T) { + d := prepareProfileImportTestdata() + var ctx context.Context + d.SetId("cluster-profile-import-1") + diags := resourceClusterProfileImportFeatureDelete(ctx, d, unitTestMockAPIClient) + assert.Empty(t, diags) + +} diff --git a/spectrocloud/resource_cluster_profile_test.go b/spectrocloud/resource_cluster_profile_test.go index af0eb484..f1e1e197 100644 --- a/spectrocloud/resource_cluster_profile_test.go +++ b/spectrocloud/resource_cluster_profile_test.go @@ -1,8 +1,11 @@ package spectrocloud import ( + "context" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/spectrocloud/gomi/pkg/ptr" "github.com/spectrocloud/palette-sdk-go/api/models" + "github.com/spectrocloud/terraform-provider-spectrocloud/types" "github.com/stretchr/testify/assert" "testing" ) @@ -40,9 +43,9 @@ func TestToClusterProfileVariables(t *testing.T) { }, } proVar = append(proVar, variables) - mockResourceData.Set("cloud", "edge-native") - mockResourceData.Set("type", "add-on") - mockResourceData.Set("profile_variables", proVar) + _ = mockResourceData.Set("cloud", "edge-native") + _ = mockResourceData.Set("type", "add-on") + _ = mockResourceData.Set("profile_variables", proVar) result, err := toClusterProfileVariables(mockResourceData) // Assertions for valid profile variables @@ -51,9 +54,9 @@ func TestToClusterProfileVariables(t *testing.T) { // Test case 2: Empty profile variables mockResourceDataEmpty := resourceClusterProfile().TestResourceData() - mockResourceDataEmpty.Set("cloud", "edge-native") - mockResourceDataEmpty.Set("type", "add-on") - mockResourceDataEmpty.Set("profile_variables", []interface{}{map[string]interface{}{}}) + _ = mockResourceDataEmpty.Set("cloud", "edge-native") + _ = mockResourceDataEmpty.Set("type", "add-on") + _ = mockResourceDataEmpty.Set("profile_variables", []interface{}{map[string]interface{}{}}) resultEmpty, errEmpty := toClusterProfileVariables(mockResourceDataEmpty) // Assertions for empty profile variables @@ -62,8 +65,8 @@ func TestToClusterProfileVariables(t *testing.T) { // Test case 3: Invalid profile variables format mockResourceDataInvalid := resourceClusterProfile().TestResourceData() - mockResourceDataInvalid.Set("cloud", "edge-native") - mockResourceDataInvalid.Set("profile_variables", []interface{}{ + _ = mockResourceDataInvalid.Set("cloud", "edge-native") + _ = mockResourceDataInvalid.Set("profile_variables", []interface{}{ map[string]interface{}{ "variable": []interface{}{}, // Invalid format, should be a list }, @@ -105,8 +108,8 @@ func TestFlattenProfileVariables(t *testing.T) { }, } proVar = append(proVar, variables) - mockResourceData.Set("cloud", "edge-native") - mockResourceData.Set("profile_variables", proVar) + _ = mockResourceData.Set("cloud", "edge-native") + _ = mockResourceData.Set("profile_variables", proVar) pv := []*models.V1Variable{ {Name: ptr.StringPtr("variable_name_1"), DisplayName: "display_name_1", Description: "description_1", Format: "string", DefaultValue: "default_value_1", Regex: "regex_1", Required: true, Immutable: false, Hidden: false}, @@ -152,8 +155,8 @@ func TestFlattenProfileVariables(t *testing.T) { // Test case 2: Empty profile variables and pv //mockResourceDataEmpty := schema.TestResourceDataRaw(t, resourceClusterProfileVariables().Schema, map[string]interface{}{}) mockResourceDataEmpty := resourceClusterProfile().TestResourceData() - mockResourceDataEmpty.Set("cloud", "edge-native") - mockResourceDataEmpty.Set("profile_variables", []interface{}{map[string]interface{}{}}) + _ = mockResourceDataEmpty.Set("cloud", "edge-native") + _ = mockResourceDataEmpty.Set("profile_variables", []interface{}{map[string]interface{}{}}) resultEmpty, errEmpty := flattenProfileVariables(mockResourceDataEmpty, nil) // Assertions for empty profile variables and pv @@ -195,43 +198,293 @@ func TestToClusterProfileVariablesRestrictionError(t *testing.T) { }, } proVar = append(proVar, variables) - mockResourceData.Set("cloud", "all") - mockResourceData.Set("type", "infra") - mockResourceData.Set("profile_variables", proVar) + _ = mockResourceData.Set("cloud", "all") + _ = mockResourceData.Set("type", "infra") + _ = mockResourceData.Set("profile_variables", proVar) result, err := toClusterProfileVariables(mockResourceData) // Assertions for valid profile variables assert.Error(t, err) assert.Len(t, result, 0) - mockResourceData.Set("cloud", "edge-native") - mockResourceData.Set("type", "infra") + _ = mockResourceData.Set("cloud", "edge-native") + _ = mockResourceData.Set("type", "infra") result, err = toClusterProfileVariables(mockResourceData) assert.NoError(t, err) assert.Len(t, result, 2) - mockResourceData.Set("cloud", "aws") - mockResourceData.Set("type", "add-on") + _ = mockResourceData.Set("cloud", "aws") + _ = mockResourceData.Set("type", "add-on") result, err = toClusterProfileVariables(mockResourceData) assert.NoError(t, err) assert.Len(t, result, 2) - mockResourceData.Set("cloud", "all") - mockResourceData.Set("type", "add-on") + _ = mockResourceData.Set("cloud", "all") + _ = mockResourceData.Set("type", "add-on") result, err = toClusterProfileVariables(mockResourceData) assert.NoError(t, err) assert.Len(t, result, 2) - mockResourceData.Set("cloud", "aws") - mockResourceData.Set("type", "infra") + _ = mockResourceData.Set("cloud", "aws") + _ = mockResourceData.Set("type", "infra") result, err = toClusterProfileVariables(mockResourceData) assert.Error(t, err) assert.Len(t, result, 0) - mockResourceData.Set("cloud", "edge-native") - mockResourceData.Set("type", "add-on") + _ = mockResourceData.Set("cloud", "edge-native") + _ = mockResourceData.Set("type", "add-on") result, err = toClusterProfileVariables(mockResourceData) assert.NoError(t, err) assert.Len(t, result, 2) } + +func TestToClusterProfilePackCreate(t *testing.T) { + tests := []struct { + name string + input map[string]interface{} + expectedError string + expectedPack *models.V1PackManifestEntity + }{ + { + name: "Valid Spectro Pack", + input: map[string]interface{}{ + "name": "test-pack", + "type": "spectro", + "tag": "v1.0", + "uid": "test-uid", + "registry_uid": "test-registry-uid", + "values": "test-values", + "manifest": []interface{}{}, + }, + expectedError: "", + expectedPack: &models.V1PackManifestEntity{ + Name: types.Ptr("test-pack"), + Tag: "v1.0", + RegistryUID: "test-registry-uid", + UID: "test-uid", + Type: models.V1PackTypeSpectro, + Values: "test-values", + Manifests: []*models.V1ManifestInputEntity{}, + }, + }, + { + name: "Spectro Pack Missing UID", + input: map[string]interface{}{ + "name": "test-pack", + "type": "spectro", + "tag": "v1.0", + "uid": "", + "values": "test-values", + "manifest": []interface{}{}, + }, + expectedError: "pack test-pack needs to specify tag and/or uid", + expectedPack: nil, + }, + { + name: "Valid Manifest Pack with Default UID", + input: map[string]interface{}{ + "name": "test-manifest-pack", + "type": "manifest", + "tag": "", + "uid": "", + "values": "test-values", + "manifest": []interface{}{ + map[string]interface{}{ + "content": "manifest-content", + "name": "manifest-name", + }, + }, + }, + expectedError: "", + expectedPack: &models.V1PackManifestEntity{ + Name: types.Ptr("test-manifest-pack"), + Tag: "", + RegistryUID: "", + UID: "spectro-manifest-pack", + Type: models.V1PackTypeManifest, + Values: "test-values", + Manifests: []*models.V1ManifestInputEntity{ + { + Content: "manifest-content", + Name: "manifest-name", + }, + }, + }, + }, + { + name: "Valid Manifest Pack with Provided UID", + input: map[string]interface{}{ + "name": "test-manifest-pack", + "type": "manifest", + "tag": "", + "uid": "custom-uid", + "values": "test-values", + "manifest": []interface{}{ + map[string]interface{}{ + "content": "manifest-content", + "name": "manifest-name", + }, + }, + }, + expectedError: "", + expectedPack: &models.V1PackManifestEntity{ + Name: types.Ptr("test-manifest-pack"), + Tag: "", + RegistryUID: "", + UID: "custom-uid", + Type: models.V1PackTypeManifest, + Values: "test-values", + Manifests: []*models.V1ManifestInputEntity{ + { + Content: "manifest-content", + Name: "manifest-name", + }, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Call the function under test + actualPack, err := toClusterProfilePackCreate(tt.input) + + // Check for errors + if tt.expectedError != "" { + assert.EqualError(t, err, tt.expectedError) + } else { + assert.NoError(t, err) + assert.Equal(t, tt.expectedPack, actualPack) + } + }) + } +} + +func prepareBaseClusterProfileTestData() *schema.ResourceData { + d := resourceClusterProfile().TestResourceData() + _ = d.Set("context", "project") + _ = d.Set("name", "test-cluster-profile") + _ = d.Set("version", "1.0.0") + _ = d.Set("description", "test unit-test") + _ = d.Set("cloud", "all") + _ = d.Set("type", "cluster") + var variables []interface{} + variables = append(variables, + map[string]interface{}{ + "variable": []interface{}{map[string]interface{}{ + "name": "test_variable", + "display_name": "Test Vat", + "format": "string", + "description": "test var description", + "default_value": "test", + "regex": "*", + "required": false, + "immutable": false, + "is_sensitive": false, + "hidden": false, + }, + }, + }, + ) + _ = d.Set("profile_variables", variables) + _ = d.Set("pack", []interface{}{ + map[string]interface{}{ + "uid": "test-pack-uid-1", + "type": "spectro", + "name": "k8", + "registry_uid": "test-pub-reg-uid", + "tag": "test:test", + "values": "test values", + "manifest": []interface{}{map[string]interface{}{ + "uid": "test-manifest-uid", + "name": "test-manifest", + "content": "value content", + }, + }, + }, + map[string]interface{}{ + "uid": "test-pack-uid-2", + "type": "spectro", + "name": "csi", + "registry_uid": "test-pub-reg-uid", + "tag": "test:test", + "values": "test values", + "manifest": []interface{}{map[string]interface{}{ + "uid": "test-manifest-uid", + "name": "test-manifest", + "content": "value content", + }, + }, + }, + map[string]interface{}{ + "uid": "test-pack-uid-3", + "type": "spectro", + "name": "cni", + "registry_uid": "test-pub-reg-uid", + "tag": "test:test", + "values": "test values", + "manifest": []interface{}{map[string]interface{}{ + "uid": "test-manifest-uid", + "name": "test-manifest", + "content": "value content", + }, + }, + }, + map[string]interface{}{ + "uid": "test-pack-uid-4", + "type": "spectro", + "name": "os", + "registry_uid": "test-pub-reg-uid", + "tag": "test:test", + "values": "test values", + "manifest": []interface{}{map[string]interface{}{ + "uid": "test-manifest-uid", + "name": "test-manifest", + "content": "value content", + }, + }, + }, + }) + d.SetId("cluster-profile-1") + return d +} + +func TestResourceClusterProfileCreate(t *testing.T) { + d := prepareBaseClusterProfileTestData() + var ctx context.Context + _ = d.Set("type", "add-on") + diags := resourceClusterProfileCreate(ctx, d, unitTestMockAPIClient) + assert.Empty(t, diags) + assert.Equal(t, "cluster-profile-1", d.Id()) +} + +func TestResourceClusterProfileCreateError(t *testing.T) { + d := prepareBaseClusterProfileTestData() + var ctx context.Context + diags := resourceClusterProfileCreate(ctx, d, unitTestMockAPIClient) + assert.NotEmpty(t, diags) +} + +func TestResourceClusterProfileRead(t *testing.T) { + d := prepareBaseClusterProfileTestData() + var ctx context.Context + diags := resourceClusterProfileRead(ctx, d, unitTestMockAPIClient) + assert.Empty(t, diags) + assert.Equal(t, "cluster-profile-1", d.Id()) +} + +func TestResourceClusterProfileUpdate(t *testing.T) { + d := prepareBaseClusterProfileTestData() + var ctx context.Context + diags := resourceClusterProfileUpdate(ctx, d, unitTestMockAPIClient) + assert.Empty(t, diags) + assert.Equal(t, "cluster-profile-1", d.Id()) +} + +func TestResourceClusterProfileDelete(t *testing.T) { + d := prepareBaseClusterProfileTestData() + var ctx context.Context + diags := resourceClusterProfileDelete(ctx, d, unitTestMockAPIClient) + assert.Empty(t, diags) +} diff --git a/spectrocloud/resource_cluster_tke_test.go b/spectrocloud/resource_cluster_tke_test.go index 24da086c..649fca05 100644 --- a/spectrocloud/resource_cluster_tke_test.go +++ b/spectrocloud/resource_cluster_tke_test.go @@ -142,7 +142,7 @@ func TestToMachinePoolTke(t *testing.T) { MaxSize: 5, IsControlPlane: false, UpdateStrategy: &models.V1UpdateStrategy{ - Type: "RollingUpdateScaleOut", // Replace with the result of getUpdateStrategy if necessary + Type: "RollingUpdateScaleOut", }, Taints: []*models.V1Taint{}, // Expected taints if any AdditionalLabels: map[string]string{}, @@ -176,7 +176,7 @@ func TestToMachinePoolTke(t *testing.T) { MaxSize: 3, IsControlPlane: true, UpdateStrategy: &models.V1UpdateStrategy{ - Type: "RollingUpdateScaleOut", // Replace with the result of getUpdateStrategy if necessary + Type: "RollingUpdateScaleOut", }, Taints: []*models.V1Taint{}, // Expected taints if any AdditionalLabels: map[string]string{}, diff --git a/spectrocloud/resource_cluster_virtual_test.go b/spectrocloud/resource_cluster_virtual_test.go index ac13507e..b3fd5393 100644 --- a/spectrocloud/resource_cluster_virtual_test.go +++ b/spectrocloud/resource_cluster_virtual_test.go @@ -40,37 +40,6 @@ func prepareVirtualClusterTestData() *schema.ResourceData { return d } -//func TestToVirtualCluster(t *testing.T) { -// assert := assert.New(t) -// // Create a mock ResourceData object -// d := prepareVirtualClusterTestData() -// -// // Mock the client -// mockClient := &client.V1Client{} -// -// // Create a mock ResourceData for testing -// vCluster, err := toVirtualCluster(mockClient, d) -// assert.Nil(err) -// -// // Check the output against the expected values -// -// // Verifying cluster name attribute -// assert.Equal(d.Get("name").(string), vCluster.Metadata.Name) -// -// // Verifying host cluster uid and cluster group uid attributes -// assert.Equal(d.Get("host_cluster_uid").(string), vCluster.Spec.ClusterConfig.HostClusterConfig.HostCluster.UID) -// assert.Equal(d.Get("cluster_group_uid").(string), vCluster.Spec.ClusterConfig.HostClusterConfig.ClusterGroup.UID) -// -// // Verifying cloud config attributes -// val, _ := d.GetOk("cloud_config") -// cloudConfig := val.([]interface{})[0].(map[string]interface{}) -// assert.Equal(cloudConfig["chart_name"].(string), vCluster.Spec.CloudConfig.HelmRelease.Chart.Name) -// assert.Equal(cloudConfig["chart_repo"].(string), vCluster.Spec.CloudConfig.HelmRelease.Chart.Repo) -// assert.Equal(cloudConfig["chart_version"].(string), vCluster.Spec.CloudConfig.HelmRelease.Chart.Version) -// assert.Equal(cloudConfig["chart_values"].(string), vCluster.Spec.CloudConfig.HelmRelease.Values) -// assert.Equal(cloudConfig["k8s_version"].(string), vCluster.Spec.CloudConfig.KubernetesVersion) -//} - func TestToVirtualClusterResize(t *testing.T) { resources := map[string]interface{}{ "max_cpu": 4, diff --git a/spectrocloud/resource_cluster_vsphere_test.go b/spectrocloud/resource_cluster_vsphere_test.go index 76c6a087..6cc47ae7 100644 --- a/spectrocloud/resource_cluster_vsphere_test.go +++ b/spectrocloud/resource_cluster_vsphere_test.go @@ -99,75 +99,6 @@ func prepareClusterVsphereTestData() *schema.ResourceData { return d } -//func TestToVsphereCluster(t *testing.T) { -// assert := assert.New(t) -// // Create a mock ResourceData object -// d := prepareClusterVsphereTestData() -// -// m := &client.V1Client{} -// -// vSphereSchema, err := toVsphereCluster(m, d) -// assert.Nil(err) -// -// // Check the output against the expected values -// // Verifying cluster name attribute -// assert.Equal(d.Get("name"), vSphereSchema.Metadata.Name) -// -// // Verifying cluster name attribute -// assert.Equal("vmware-basic-infra-profile-id", vSphereSchema.Spec.Profiles[0].UID) -// -// // Verifying cluster_meta_attribute attribute -// assert.Equal("{'nic_name': 'test', 'env': 'stage'}", vSphereSchema.Spec.ClusterConfig.ClusterMetaAttribute) -// -// // Verifying account id attribute -// assert.Equal("vmware-basic-account-id", vSphereSchema.Spec.CloudAccountUID) -// -// // Verifying cloud config attributes -// assert.Equal("spectrocloud.dev", vSphereSchema.Spec.CloudConfig.ControlPlaneEndpoint.DdnsSearchDomain) -// assert.Equal("DDNS", vSphereSchema.Spec.CloudConfig.ControlPlaneEndpoint.Type) -// assert.Equal("Datacenter", vSphereSchema.Spec.CloudConfig.Placement.Datacenter) -// assert.Equal("sc_test/terraform", vSphereSchema.Spec.CloudConfig.Placement.Folder) -// assert.Equal(2, len(vSphereSchema.Spec.CloudConfig.SSHKeys)) -// assert.Equal(false, vSphereSchema.Spec.CloudConfig.StaticIP) -// -// // Verifying control-plane pool attributes -// assert.Equal(2, len(vSphereSchema.Spec.Machinepoolconfig)) -// cpPoolIndex := 0 -// workerPoolIndex := 1 -// if *vSphereSchema.Spec.Machinepoolconfig[0].PoolConfig.Name == "cp-pool" { -// cpPoolIndex = 0 -// workerPoolIndex = 1 -// } else { -// cpPoolIndex = 1 -// workerPoolIndex = 0 -// } -// -// assert.Equal("cp-pool", *vSphereSchema.Spec.Machinepoolconfig[cpPoolIndex].PoolConfig.Name) -// assert.Equal(true, vSphereSchema.Spec.Machinepoolconfig[cpPoolIndex].PoolConfig.IsControlPlane) -// assert.Equal(int32(40), *vSphereSchema.Spec.Machinepoolconfig[cpPoolIndex].CloudConfig.InstanceType.DiskGiB) -// assert.Equal(int64(8192), *vSphereSchema.Spec.Machinepoolconfig[cpPoolIndex].CloudConfig.InstanceType.MemoryMiB) -// assert.Equal(int32(4), *vSphereSchema.Spec.Machinepoolconfig[cpPoolIndex].CloudConfig.InstanceType.NumCPUs) -// assert.Equal("test cluster", vSphereSchema.Spec.Machinepoolconfig[cpPoolIndex].CloudConfig.Placements[0].Cluster) -// assert.Equal("datastore55_2", vSphereSchema.Spec.Machinepoolconfig[cpPoolIndex].CloudConfig.Placements[0].Datastore) -// assert.Equal("Default", vSphereSchema.Spec.Machinepoolconfig[cpPoolIndex].CloudConfig.Placements[0].ResourcePool) -// assert.Equal("VM Network", *vSphereSchema.Spec.Machinepoolconfig[cpPoolIndex].CloudConfig.Placements[0].Network.NetworkName) -// assert.Equal("testpoolid", vSphereSchema.Spec.Machinepoolconfig[cpPoolIndex].CloudConfig.Placements[0].Network.ParentPoolUID) -// assert.Equal(true, vSphereSchema.Spec.Machinepoolconfig[cpPoolIndex].CloudConfig.Placements[0].Network.StaticIP) -// -// // Verifying Worker pool attributes -// assert.Equal("worker-basic", *vSphereSchema.Spec.Machinepoolconfig[workerPoolIndex].PoolConfig.Name) -// assert.Equal(false, vSphereSchema.Spec.Machinepoolconfig[workerPoolIndex].PoolConfig.IsControlPlane) -// assert.Equal(int32(40), *vSphereSchema.Spec.Machinepoolconfig[workerPoolIndex].CloudConfig.InstanceType.DiskGiB) -// assert.Equal(int64(8192), *vSphereSchema.Spec.Machinepoolconfig[workerPoolIndex].CloudConfig.InstanceType.MemoryMiB) -// assert.Equal(int32(4), *vSphereSchema.Spec.Machinepoolconfig[workerPoolIndex].CloudConfig.InstanceType.NumCPUs) -// assert.Equal("test cluster", vSphereSchema.Spec.Machinepoolconfig[workerPoolIndex].CloudConfig.Placements[0].Cluster) -// assert.Equal("datastore55_2", vSphereSchema.Spec.Machinepoolconfig[workerPoolIndex].CloudConfig.Placements[0].Datastore) -// assert.Equal("Default", vSphereSchema.Spec.Machinepoolconfig[workerPoolIndex].CloudConfig.Placements[0].ResourcePool) -// assert.Equal("VM Network", *vSphereSchema.Spec.Machinepoolconfig[workerPoolIndex].CloudConfig.Placements[0].Network.NetworkName) -// assert.Equal("testpoolid", vSphereSchema.Spec.Machinepoolconfig[workerPoolIndex].CloudConfig.Placements[0].Network.ParentPoolUID) -// assert.Equal(true, vSphereSchema.Spec.Machinepoolconfig[workerPoolIndex].CloudConfig.Placements[0].Network.StaticIP) -//} - func TestToCloudConfigUpdate(t *testing.T) { assert := assert.New(t) cloudConfig := map[string]interface{}{ @@ -188,100 +119,6 @@ func TestToCloudConfigUpdate(t *testing.T) { assert.Equal(false, cloudEntity.ClusterConfig.StaticIP) } -//func TestResourceClusterVsphereCreate(t *testing.T) { -// -// // Create a mock ResourceData object -// d := prepareClusterVsphereTestData() -// d.Set("skip_completion", true) -// m := &client.V1Client{} -// ctx := context.Background() -// diags := resourceClusterVsphereCreate(ctx, d, m) -// if len(diags) > 0 { -// t.Errorf("Unexpected diagnostics: %#v", diags) -// } -// -// if d.Id() != "vsphere-cluster-uid" { -// t.Errorf("Expected ID to be 'test-uid', got %s", d.Id()) -// } -//} - -//func TestResourceClusterVsphereCreateError(t *testing.T) { -// -// d := prepareClusterVsphereTestData() -// d.Set("skip_completion", true) -// m := &client.V1Client{} -// ctx := context.Background() -// diags := resourceClusterVsphereCreate(ctx, d, m) -// if diags[0].Summary != "covering error case" { -// t.Errorf("Unexpected diagnostics: %#v", diags) -// } -//} -// -//func getClientForCluster() *client.V1Client { -// m := &client.V1Client{} -// return m -//} -//func TestResourceClusterVsphereRead(t *testing.T) { -// // Create a mock ResourceData object -// d := prepareClusterVsphereTestData() -// m := getClientForCluster() -// ctx := context.Background() -// diags := resourceClusterVsphereRead(ctx, d, m) -// if len(diags) > 0 { -// t.Errorf("Unexpected diagnostics: %#v", diags) -// } -//} -// -//func TestResourceClusterVsphereReadValidationErrorSpec(t *testing.T) { -// d := prepareClusterVsphereTestData() -// m := &client.V1Client{} -// ctx := context.Background() -// diags := resourceClusterVsphereRead(ctx, d, m) -// if len(diags) == 0 { -// t.Errorf("Unexpected diagnostics: %#v", diags) -// } -// if diags[0].Summary != "cluster spec is nil in cluster mockid123" { -// t.Errorf("Unexpected diagnostics: %#v", diags) -// } -//} - -//func TestResourceClusterVsphereReadValidationErrorCloudType(t *testing.T) { -// d := prepareClusterVsphereTestData() -// m := &client.V1Client{} -// ctx := context.Background() -// diags := resourceClusterVsphereRead(ctx, d, m) -// if len(diags) == 0 { -// t.Errorf("Unexpected diagnostics: %#v", diags) -// } -// if diags[0].Summary != "resource with id mockid123 is not of type spectrocloud_cluster_vsphere, need to correct resource type" { -// t.Errorf("Unexpected diagnostics: %#v", diags) -// } -//} - -//func TestResourceClusterVsphereReadNilCluster(t *testing.T) { -// // Create a mock ResourceData object -// d := prepareClusterVsphereTestData() -// m := &client.V1Client{} -// ctx := context.Background() -// diags := resourceClusterVsphereRead(ctx, d, m) -// if len(diags) > 0 { -// t.Errorf("Unexpected diagnostics: %#v", diags) -// } -// assert.Equal(t, "", d.Id()) -//} - -//func TestResourceClusterVsphereReadError(t *testing.T) { -// // Create a mock ResourceData object -// d := prepareClusterVsphereTestData() -// m := &client.V1Client{} -// ctx := context.Background() -// diags := resourceClusterVsphereRead(ctx, d, m) -// assert.Equal(t, len(diags), 1) -// if diags[0].Summary != "unexpected Error" { -// t.Errorf("Unexpected diagnostics: %#v", diags) -// } -//} - func getMachinePlacement() []*models.V1VspherePlacementConfig { network := new(string) *network = "test-net" @@ -369,16 +206,6 @@ func getCloudConfig() *models.V1VsphereCloudConfig { return cloudConfig } -//func TestFlattenCloudConfigVsphere(t *testing.T) { -// d := prepareClusterVsphereTestData() -// m := &client.V1Client{} -// diags := flattenCloudConfigVsphere("", d, m) -// if len(diags) > 0 { -// t.Errorf("Unexpected diagnostics: %#v", diags) -// } -// -//} - func TestFlattenClusterConfigsVsphere(t *testing.T) { inputCloudConfig := &models.V1VsphereCloudConfig{ Spec: &models.V1VsphereCloudConfigSpec{ @@ -440,18 +267,6 @@ func TestFlattenMachinePoolConfigsVsphereNil(t *testing.T) { } } -//func TestResourceClusterVsphereUpdate(t *testing.T) { -// d := prepareClusterVsphereTestData() -// m := &client.V1Client{} -// ctx := context.Background() -// diags := resourceClusterVsphereUpdate(ctx, d, m) -// if len(diags) > 0 { -// t.Errorf("Unexpected diagnostics: %#v", diags) -// } -// assert.Equal(t, "", d.Id()) -// -//} - func TestFlattenMachinePoolConfigsVsphere(t *testing.T) { // Define test cases testCases := []struct { diff --git a/spectrocloud/resource_filter_test.go b/spectrocloud/resource_filter_test.go new file mode 100644 index 00000000..ffc17e28 --- /dev/null +++ b/spectrocloud/resource_filter_test.go @@ -0,0 +1,68 @@ +package spectrocloud + +import ( + "context" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/stretchr/testify/assert" + "testing" +) + +func prepareBaseFilterTestData() *schema.ResourceData { + d := resourceFilter().TestResourceData() + _ = d.Set("metadata", []interface{}{ + map[string]interface{}{ + "name": "test-filter-name", + }, + }) + _ = d.Set("spec", []interface{}{ + map[string]interface{}{ + "filter_group": []interface{}{ + map[string]interface{}{ + "conjunction": "AND", + "filters": []interface{}{ + map[string]interface{}{ + "key": "test-key", + "negation": false, + "operator": "eq", + "values": []string{"test-value"}, + }, + }, + }, + }, + }, + }) + d.SetId("test-filter-id") + return d +} + +func TestResourceFilterCreate(t *testing.T) { + d := prepareBaseFilterTestData() + var ctx context.Context + diags := resourceFilterCreate(ctx, d, unitTestMockAPIClient) + assert.Empty(t, diags) + assert.Equal(t, "test-filter-id", d.Id()) +} + +func TestResourceFilterRead(t *testing.T) { + d := prepareBaseFilterTestData() + var ctx context.Context + diags := resourceFilterRead(ctx, d, unitTestMockAPIClient) + assert.Empty(t, diags) + assert.Equal(t, "test-filter-id", d.Id()) +} + +func TestResourceFilterUpdate(t *testing.T) { + d := prepareBaseFilterTestData() + var ctx context.Context + diags := resourceFilterUpdate(ctx, d, unitTestMockAPIClient) + assert.Empty(t, diags) + assert.Equal(t, "test-filter-id", d.Id()) +} + +func TestResourceFilterDelete(t *testing.T) { + d := prepareBaseFilterTestData() + var ctx context.Context + diags := resourceFilterDelete(ctx, d, unitTestMockAPIClient) + assert.Empty(t, diags) + +} diff --git a/spectrocloud/resource_kubevirt_datavolume.go b/spectrocloud/resource_kubevirt_datavolume.go index 0247ce36..d60bee42 100644 --- a/spectrocloud/resource_kubevirt_datavolume.go +++ b/spectrocloud/resource_kubevirt_datavolume.go @@ -170,40 +170,6 @@ func resourceKubevirtDataVolumeDelete(ctx context.Context, d *schema.ResourceDat return diags } -func FlattenAddVolumeOptions(addVolumeOptions *models.V1VMAddVolumeOptions) []interface{} { - if addVolumeOptions == nil { - return []interface{}{} - } - - result := map[string]interface{}{ - "name": addVolumeOptions.Name, - } - - if addVolumeOptions.Disk != nil && addVolumeOptions.Disk.Disk != nil { - result["disk"] = []interface{}{ - map[string]interface{}{ - "name": addVolumeOptions.Disk.Name, - "bus": addVolumeOptions.Disk.Disk.Bus, - }, - } - } - - if addVolumeOptions.VolumeSource != nil && addVolumeOptions.VolumeSource.DataVolume != nil { - result["volume_source"] = []interface{}{ - map[string]interface{}{ - "data_volume": []interface{}{ - map[string]interface{}{ - "name": addVolumeOptions.VolumeSource.DataVolume.Name, - "hotpluggable": addVolumeOptions.VolumeSource.DataVolume.Hotpluggable, - }, - }, - }, - } - } - - return []interface{}{result} -} - func ExpandAddVolumeOptions(addVolumeOptions []interface{}) *models.V1VMAddVolumeOptions { if len(addVolumeOptions) == 0 || addVolumeOptions[0] == nil { return nil diff --git a/spectrocloud/resource_macros_test.go b/spectrocloud/resource_macros_test.go index 49320980..42ab3e2d 100644 --- a/spectrocloud/resource_macros_test.go +++ b/spectrocloud/resource_macros_test.go @@ -1,6 +1,8 @@ package spectrocloud import ( + "context" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/spectrocloud/palette-sdk-go/api/models" "github.com/stretchr/testify/assert" "testing" @@ -104,105 +106,267 @@ func TestMergeExistingMacros_NoMacros(t *testing.T) { assert.Equal(t, expectedMacros, resultWithoutMacros) } -//func TestResourceMacrosCreate(t *testing.T) { -// // Mock dependencies -// mockResourceData := resourceMacros().TestResourceData() -// mockResourceData.Set("macros", map[string]interface{}{ -// "macro_1": "aaa1", -// }) -// mockResourceData.Set("project", "Default") -// mockClient := &client.V1Client{} -// // Call the function with mocked dependencies -// diags := resourceMacrosCreate(context.Background(), mockResourceData, mockClient) -// -// // Assertions -// var expectedDiag diag.Diagnostics -// assert.Equal(t, expectedDiag, diags) -// assert.Equal(t, "project-macros-testUID", mockResourceData.Id()) -//} -// -//func TestResourceMacrosRead(t *testing.T) { -// // Test case 1: Successful read -// mockResourceData := resourceMacros().TestResourceData() -// mockResourceData.SetId("testMacrosId") -// mockResourceData.Set("project", "Default") -// mockResourceData.Set("macros", map[string]interface{}{"macro_1": "value_1"}) -// -// mockClient := &client.V1Client{} -// -// diags := resourceMacrosRead(context.Background(), mockResourceData, mockClient) -// -// // Assertions for successful read -// var expectedDiag diag.Diagnostics -// assert.Equal(t, expectedDiag, diags) -// assert.Equal(t, "testMacrosId", mockResourceData.Id()) -// assert.Equal(t, map[string]interface{}{"macro_1": "value_1", "macro_2": "value_2"}, mockResourceData.Get("macros")) -// -// // Test case 2: Error during read -// mockResourceDataWithError := resourceMacros().TestResourceData() -// mockResourceDataWithError.Set("project", "Default") -// mockResourceDataWithError.Set("macros", map[string]interface{}{"macro_1": "value_1"}) -// -// mockClientWithError := &client.V1Client{} -// -// diagsWithError := resourceMacrosRead(context.Background(), mockResourceDataWithError, mockClientWithError) -// -// // Assertions for error case -// assert.Equal(t, "failed to get project UID", diagsWithError[0].Summary) -// assert.Equal(t, "", mockResourceDataWithError.Id()) // ID should not be set on error -// -//} -// -//func TestResourceMacrosUpdate(t *testing.T) { -// // Test case 1: Successful update -// mockResourceData := resourceMacros().TestResourceData() -// mockResourceData.Set("project", "Default") -// mockResourceData.Set("macros", map[string]interface{}{"macro_1": "value_1"}) -// -// mockClient := &client.V1Client{} -// -// diags := resourceMacrosUpdate(context.Background(), mockResourceData, mockClient) -// -// // Assertions for successful update -// var expectedDiag diag.Diagnostics -// assert.Equal(t, expectedDiag, diags) -// -// // Test case 2: Error during update -// mockResourceDataWithError := resourceMacros().TestResourceData() -// mockResourceDataWithError.Set("project", "Default") -// mockResourceDataWithError.Set("macros", map[string]interface{}{"macro_1": "value_1"}) -// -// mockClientWithError := &client.V1Client{} -// -// diagsWithError := resourceMacrosUpdate(context.Background(), mockResourceDataWithError, mockClientWithError) -// -// // Assertions for error case -// assert.Equal(t, "failed to get project UID", diagsWithError[0].Summary) -//} -// -//func TestResourceMacrosDelete(t *testing.T) { -// // Test case 1: Successful deletion -// mockResourceData := resourceMacros().TestResourceData() -// mockResourceData.Set("project", "Default") -// mockResourceData.Set("macros", map[string]interface{}{"macro_1": "value_1"}) -// -// mockClient := &client.V1Client{} -// -// diags := resourceMacrosDelete(context.Background(), mockResourceData, mockClient) -// -// // Assertions for successful deletion -// var expectedDiag diag.Diagnostics -// assert.Equal(t, expectedDiag, diags) -// -// // Test case 2: Error during deletion -// mockResourceDataWithError := resourceMacros().TestResourceData() -// mockResourceDataWithError.Set("project", "Default") -// mockResourceDataWithError.Set("macros", map[string]interface{}{"macro_1": "value_1"}) -// -// mockClientWithError := &client.V1Client{} -// -// diagsWithError := resourceMacrosDelete(context.Background(), mockResourceDataWithError, mockClientWithError) -// -// // Assertions for error case -// assert.Equal(t, "failed to get project UID", diagsWithError[0].Summary) -//} +func prepareBaseTenantMacrosSchema() *schema.ResourceData { + // Get an initialized ResourceData from resourceMacros + d := resourceMacros().TestResourceData() + + // Set values for the macros and project fields + err := d.Set("macros", map[string]interface{}{ + "macro1": "value1", + "macro2": "value2", + }) + if err != nil { + panic(err) // Handle the error as appropriate in your test setup + } + return d +} + +func prepareBaseProjectMacrosSchema() *schema.ResourceData { + // Get an initialized ResourceData from resourceMacros + d := resourceMacros().TestResourceData() + + // Set values for the macros and project fields + err := d.Set("macros", map[string]interface{}{ + "macro1": "value1", + "macro2": "value2", + }) + if err != nil { + panic(err) // Handle the error as appropriate in your test setup + } + + err = d.Set("project", "Default") + if err != nil { + panic(err) // Handle the error as appropriate in your test setup + } + return d +} + +func TestResourceProjectMacrosCreate(t *testing.T) { + ctx := context.Background() + resourceData := prepareBaseProjectMacrosSchema() + + // Call the function + diags := resourceMacrosCreate(ctx, resourceData, unitTestMockAPIClient) + + // Assertions + assert.Equal(t, 0, len(diags)) + +} + +func TestResourceTenantMacrosCreate(t *testing.T) { + ctx := context.Background() + resourceData := prepareBaseTenantMacrosSchema() + + // Call the function + diags := resourceMacrosCreate(ctx, resourceData, unitTestMockAPIClient) + + // Assertions + assert.Equal(t, 0, len(diags)) + +} + +func TestResourceProjectMacrosRead(t *testing.T) { + ctx := context.Background() + resourceData := prepareBaseProjectMacrosSchema() + + // Call the function + diags := resourceMacrosRead(ctx, resourceData, unitTestMockAPIClient) + + // Assertions + assert.Equal(t, 0, len(diags)) + +} + +func TestResourceTenantMacrosRead(t *testing.T) { + ctx := context.Background() + resourceData := prepareBaseTenantMacrosSchema() + + // Call the function + diags := resourceMacrosRead(ctx, resourceData, unitTestMockAPIClient) + + // Assertions + assert.Equal(t, 0, len(diags)) + +} + +func TestResourceProjectMacrosUpdate(t *testing.T) { + ctx := context.Background() + resourceData := prepareBaseProjectMacrosSchema() + // Set values for the macros update + err := resourceData.Set("macros", map[string]interface{}{ + "macro1": "value12", + "macro2": "value23", + }) + if err != nil { + panic(err) // Handle the error as appropriate in your test setup + } + + // Call the function + diags := resourceMacrosUpdate(ctx, resourceData, unitTestMockAPIClient) + + // Assertions + assert.Equal(t, 0, len(diags)) + +} + +func TestResourceTenantMacrosUpdate(t *testing.T) { + ctx := context.Background() + resourceData := prepareBaseTenantMacrosSchema() + // Set values for the macros update + err := resourceData.Set("macros", map[string]interface{}{ + "macro1": "value12", + "macro2": "value23", + }) + if err != nil { + panic(err) // Handle the error as appropriate in your test setup + } + + // Call the function + diags := resourceMacrosUpdate(ctx, resourceData, unitTestMockAPIClient) + + // Assertions + assert.Equal(t, 0, len(diags)) +} + +func TestResourceProjectMacrosDelete(t *testing.T) { + ctx := context.Background() + resourceData := prepareBaseProjectMacrosSchema() + + // Call the function + diags := resourceMacrosDelete(ctx, resourceData, unitTestMockAPIClient) + + // Assertions + assert.Equal(t, 0, len(diags)) + +} + +func TestResourceTenantMacrosDelete(t *testing.T) { + ctx := context.Background() + resourceData := prepareBaseTenantMacrosSchema() + + // Call the function + diags := resourceMacrosDelete(ctx, resourceData, unitTestMockAPIClient) + + // Assertions + assert.Equal(t, 0, len(diags)) +} + +func TestResourceProjectMacrosCreateNegative(t *testing.T) { + ctx := context.Background() + resourceData := prepareBaseProjectMacrosSchema() // Assuming this prepares the schema data correctly + + // Call the function + diags := resourceMacrosCreate(ctx, resourceData, unitTestMockAPINegativeClient) + + // Assertions + if assert.NotEmpty(t, diags) { // Check that diags is not empty + assert.Contains(t, diags[0].Summary, "Macro already exists") // Verify the error message + } +} + +func TestResourceTenantMacrosCreateNegative(t *testing.T) { + ctx := context.Background() + resourceData := prepareBaseTenantMacrosSchema() + + // Call the function + diags := resourceMacrosCreate(ctx, resourceData, unitTestMockAPINegativeClient) + + // Assertions + if assert.NotEmpty(t, diags) { // Check that diags is not empty + assert.Contains(t, diags[0].Summary, "Macro already exists") // Verify the error message + } +} + +func TestResourceProjectMacrosReadNegative(t *testing.T) { + ctx := context.Background() + resourceData := prepareBaseProjectMacrosSchema() + + // Call the function + diags := resourceMacrosRead(ctx, resourceData, unitTestMockAPINegativeClient) + + // Assertions + if assert.NotEmpty(t, diags) { // Check that diags is not empty + assert.Contains(t, diags[0].Summary, "Macro not found") // Verify the error message + } + +} + +func TestResourceTenantMacrosReadNegative(t *testing.T) { + ctx := context.Background() + resourceData := prepareBaseTenantMacrosSchema() + + // Call the function + diags := resourceMacrosRead(ctx, resourceData, unitTestMockAPINegativeClient) + + // Assertions + if assert.NotEmpty(t, diags) { // Check that diags is not empty + assert.Contains(t, diags[0].Summary, "Macro not found") // Verify the error message + } + +} + +func TestResourceProjectMacrosUpdateNegative(t *testing.T) { + ctx := context.Background() + resourceData := prepareBaseProjectMacrosSchema() + // Set values for the macros update + err := resourceData.Set("macros", map[string]interface{}{ + "macro1": "value12", + "macro2": "value23", + }) + if err != nil { + panic(err) // Handle the error as appropriate in your test setup + } + + // Call the function + diags := resourceMacrosUpdate(ctx, resourceData, unitTestMockAPINegativeClient) + + // Assertions + assert.Empty(t, diags) + +} + +func TestResourceTenantMacrosUpdateNegative(t *testing.T) { + ctx := context.Background() + resourceData := prepareBaseTenantMacrosSchema() + // Set values for the macros update + err := resourceData.Set("macros", map[string]interface{}{ + "macro1": "value12", + "macro2": "value23", + }) + if err != nil { + panic(err) // Handle the error as appropriate in your test setup + } + + // Call the function + diags := resourceMacrosUpdate(ctx, resourceData, unitTestMockAPINegativeClient) + + // Assertions + assert.Empty(t, diags) +} + +func TestResourceProjectMacrosDeleteNegative(t *testing.T) { + ctx := context.Background() + resourceData := prepareBaseProjectMacrosSchema() + + // Call the function + diags := resourceMacrosDelete(ctx, resourceData, unitTestMockAPINegativeClient) + + // Assertions + if assert.NotEmpty(t, diags) { // Check that diags is not empty + assert.Contains(t, diags[0].Summary, "Macro not found") // Verify the error message + } + +} + +func TestResourceTenantMacrosDeleteNegative(t *testing.T) { + ctx := context.Background() + resourceData := prepareBaseTenantMacrosSchema() + + // Call the function + diags := resourceMacrosDelete(ctx, resourceData, unitTestMockAPINegativeClient) + + // Assertions + if assert.NotEmpty(t, diags) { // Check that diags is not empty + assert.Contains(t, diags[0].Summary, "Macro not found") // Verify the error message + } +} diff --git a/spectrocloud/resource_pcg_ippool_test.go b/spectrocloud/resource_pcg_ippool_test.go index 6e8d45b1..2f4f0fe9 100644 --- a/spectrocloud/resource_pcg_ippool_test.go +++ b/spectrocloud/resource_pcg_ippool_test.go @@ -1,6 +1,9 @@ package spectrocloud import ( + "context" + "github.com/hashicorp/go-cty/cty" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/spectrocloud/palette-sdk-go/api/models" "github.com/stretchr/testify/assert" @@ -95,3 +98,110 @@ func TestToIpPool(t *testing.T) { }) } } + +func prepareResourcePrivateCloudGatewayIpPool() *schema.ResourceData { + d := resourcePrivateCloudGatewayIpPool().TestResourceData() + d.SetId("test-pcg-id") + _ = d.Set("name", "test-ippool") + _ = d.Set("private_cloud_gateway_id", "test-pcg-id") + _ = d.Set("network_type", "subnet") + _ = d.Set("ip_start_range", "121.0.0.1") + _ = d.Set("ip_end_range", "121.0.0.100") + _ = d.Set("subnet_cidr", "test-subnet-cidr") + _ = d.Set("prefix", 0) + _ = d.Set("gateway", "test-gateway") + _ = d.Set("nameserver_addresses", []string{"test.test.cm"}) + _ = d.Set("nameserver_search_suffix", []string{"test-suffix"}) + _ = d.Set("restrict_to_single_cluster", false) + return d +} + +func TestResourceIpPoolCreate(t *testing.T) { + d := prepareResourcePrivateCloudGatewayIpPool() + ctx := context.Background() + diags := resourceIpPoolCreate(ctx, d, unitTestMockAPIClient) + assert.Len(t, diags, 0) + assert.Equal(t, "test-pcg-id", d.Id()) +} + +func TestResourceIpPoolRead(t *testing.T) { + d := prepareResourcePrivateCloudGatewayIpPool() + ctx := context.Background() + diags := resourceIpPoolRead(ctx, d, unitTestMockAPIClient) + assert.Len(t, diags, 0) + assert.Equal(t, "test-pcg-id", d.Id()) +} + +func TestResourceIpPoolReadRange(t *testing.T) { + d := prepareResourcePrivateCloudGatewayIpPool() + ctx := context.Background() + diags := resourceIpPoolRead(ctx, d, unitTestMockAPIClient) + _ = d.Set("network_type", "range") + assert.Len(t, diags, 0) + assert.Equal(t, "test-pcg-id", d.Id()) +} + +func TestResourceIpPoolUpdate(t *testing.T) { + d := prepareResourcePrivateCloudGatewayIpPool() + ctx := context.Background() + diags := resourceIpPoolUpdate(ctx, d, unitTestMockAPIClient) + assert.Len(t, diags, 0) + assert.Equal(t, "test-pcg-id", d.Id()) +} + +func TestResourceIpPoolDelete(t *testing.T) { + d := prepareResourcePrivateCloudGatewayIpPool() + ctx := context.Background() + diags := resourceIpPoolDelete(ctx, d, unitTestMockAPIClient) + assert.Len(t, diags, 0) + assert.Equal(t, "test-pcg-id", d.Id()) +} + +func TestValidateNetworkType(t *testing.T) { + tests := []struct { + name string + input interface{} + expectedDiags diag.Diagnostics + expectedError bool + expectedErrMsg string + }{ + { + name: "Valid network type - range", + input: "range", + expectedDiags: diag.Diagnostics{}, + expectedError: false, + }, + { + name: "Valid network type - subnet", + input: "subnet", + expectedDiags: diag.Diagnostics{}, + expectedError: false, + }, + { + name: "Invalid network type - random", + input: "random", + expectedError: true, + expectedErrMsg: "network type 'random' is invalid. valid network types are 'range' and 'subnet'", + }, + { + name: "Invalid network type - empty string", + input: "", + expectedError: true, + expectedErrMsg: "network type '' is invalid. valid network types are 'range' and 'subnet'", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + diags := validateNetworkType(tt.input, cty.Path{}) + + if tt.expectedError { + assert.NotEmpty(t, diags) + assert.Equal(t, diag.Error, diags[0].Severity) + assert.Equal(t, tt.expectedErrMsg, diags[0].Summary) + } else { + assert.Empty(t, diags) + } + }) + } +} diff --git a/spectrocloud/resource_project_test.go b/spectrocloud/resource_project_test.go index e0212b6e..0706cd9b 100644 --- a/spectrocloud/resource_project_test.go +++ b/spectrocloud/resource_project_test.go @@ -1,6 +1,8 @@ package spectrocloud import ( + "context" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "testing" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -8,6 +10,16 @@ import ( "github.com/stretchr/testify/assert" ) +func prepareBaseProjectSchema() *schema.ResourceData { + d := resourceProject().TestResourceData() + d.SetId("test123") + err := d.Set("name", "Default") + if err != nil { + return nil + } + return d +} + // TestToProject tests the toProject function func TestToProject(t *testing.T) { tests := []struct { @@ -81,3 +93,91 @@ func TestToProject(t *testing.T) { }) } } + +func TestCreateProjectFunc(t *testing.T) { + d := prepareBaseProjectSchema() + var diags diag.Diagnostics + err := d.Set("name", "dev") + if err != nil { + return + } + var ctx context.Context + diags = resourceProjectCreate(ctx, d, unitTestMockAPIClient) + assert.Equal(t, 0, len(diags)) +} + +func TestReadProjectFunc(t *testing.T) { + d := resourceProject().TestResourceData() + var diags diag.Diagnostics + d.SetId("test123") + + var ctx context.Context + diags = resourceProjectRead(ctx, d, unitTestMockAPIClient) + assert.Equal(t, 0, len(diags)) +} + +func TestResourceProjectUpdate(t *testing.T) { + // Prepare the schema data for the test. + d := prepareBaseProjectSchema() + // Call the function you want to test. + ctx := context.Background() + diags := resourceProjectUpdate(ctx, d, unitTestMockAPIClient) + // Assert that no diagnostics were returned (i.e., no errors). + assert.Empty(t, diags) +} + +func TestResourceProjectDelete(t *testing.T) { + // Prepare the schema data for the test. + d := prepareBaseProjectSchema() + // Call the function you want to test. + ctx := context.Background() + diags := resourceProjectDelete(ctx, d, unitTestMockAPIClient) + // Assert that no diagnostics were returned (i.e., no errors). + assert.Empty(t, diags) +} + +// Negative case's + +func TestCreateProjectNegativeFunc(t *testing.T) { + d := prepareBaseProjectSchema() + var diags diag.Diagnostics + err := d.Set("name", "dev") + if err != nil { + return + } + var ctx context.Context + diags = resourceProjectCreate(ctx, d, unitTestMockAPINegativeClient) + assertFirstDiagMessage(t, diags, "Project already exist") +} + +func TestReadProjectNegativeFunc(t *testing.T) { + d := resourceProject().TestResourceData() + var diags diag.Diagnostics + d.SetId("test123") + + var ctx context.Context + diags = resourceProjectRead(ctx, d, unitTestMockAPINegativeClient) + assertFirstDiagMessage(t, diags, "Project not found") +} + +func TestUpdateProjectNegativeFunc(t *testing.T) { + d := prepareBaseProjectSchema() + var diags diag.Diagnostics + err := d.Set("name", "dev") + if err != nil { + return + } + var ctx context.Context + diags = resourceProjectUpdate(ctx, d, unitTestMockAPINegativeClient) + assertFirstDiagMessage(t, diags, "Operation not allowed") + +} + +func TestResourceProjectInvalidDelete(t *testing.T) { + // Prepare the schema data for the test. + d := prepareBaseProjectSchema() + ctx := context.Background() + // Call the function you want to test. + diags := resourceProjectDelete(ctx, d, unitTestMockAPINegativeClient) + assertFirstDiagMessage(t, diags, "Project not found") +} diff --git a/spectrocloud/resource_registry_helm_test.go b/spectrocloud/resource_registry_helm_test.go new file mode 100644 index 00000000..7620eea7 --- /dev/null +++ b/spectrocloud/resource_registry_helm_test.go @@ -0,0 +1,90 @@ +package spectrocloud + +import ( + "context" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/stretchr/testify/assert" + "testing" +) + +func prepareResourceRegistryHelm() *schema.ResourceData { + d := resourceRegistryHelm().TestResourceData() + d.SetId("test-reg-id") + _ = d.Set("name", "test-reg-name") + _ = d.Set("is_private", true) + _ = d.Set("endpoint", "test.com") + var cred []interface{} + cred = append(cred, map[string]interface{}{ + "credential_type": "token", + "username": "test-username", + "password": "test-password", + "token": "test_token", + }) + _ = d.Set("credentials", cred) + return d +} + +func TestResourceRegistryHelmCreate(t *testing.T) { + d := prepareResourceRegistryHelm() + var diags diag.Diagnostics + var ctx context.Context + diags = resourceRegistryHelmCreate(ctx, d, unitTestMockAPIClient) + assert.Equal(t, 0, len(diags)) +} + +func TestResourceRegistryHelmCreateNoAuth(t *testing.T) { + d := prepareResourceRegistryHelm() + var diags diag.Diagnostics + var ctx context.Context + var cred []interface{} + cred = append(cred, map[string]interface{}{ + "credential_type": "noAuth", + "username": "test-username", + "password": "test-password", + "token": "test_token", + }) + _ = d.Set("credentials", cred) + diags = resourceRegistryHelmCreate(ctx, d, unitTestMockAPIClient) + assert.Equal(t, 0, len(diags)) +} + +func TestResourceRegistryHelmCreateBasic(t *testing.T) { + d := prepareResourceRegistryHelm() + var diags diag.Diagnostics + var ctx context.Context + var cred []interface{} + cred = append(cred, map[string]interface{}{ + "credential_type": "basic", + "username": "test-username", + "password": "test-password", + "token": "test_token", + }) + _ = d.Set("credentials", cred) + diags = resourceRegistryHelmCreate(ctx, d, unitTestMockAPIClient) + assert.Equal(t, 0, len(diags)) +} + +func TestResourceRegistryHelmRead(t *testing.T) { + d := prepareResourceRegistryHelm() + var diags diag.Diagnostics + var ctx context.Context + diags = resourceRegistryHelmRead(ctx, d, unitTestMockAPIClient) + assert.Equal(t, 0, len(diags)) +} + +func TestResourceRegistryHelmUpdate(t *testing.T) { + d := prepareResourceRegistryHelm() + var diags diag.Diagnostics + var ctx context.Context + diags = resourceRegistryHelmUpdate(ctx, d, unitTestMockAPIClient) + assert.Equal(t, 0, len(diags)) +} + +func TestResourceRegistryHelmDelete(t *testing.T) { + d := prepareResourceRegistryHelm() + var diags diag.Diagnostics + var ctx context.Context + diags = resourceRegistryHelmDelete(ctx, d, unitTestMockAPIClient) + assert.Equal(t, 0, len(diags)) +} diff --git a/spectrocloud/resource_registry_oci_ecr_test.go b/spectrocloud/resource_registry_oci_ecr_test.go index bdfb9c2c..08dea18c 100644 --- a/spectrocloud/resource_registry_oci_ecr_test.go +++ b/spectrocloud/resource_registry_oci_ecr_test.go @@ -1,15 +1,18 @@ package spectrocloud import ( + "context" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/stretchr/testify/assert" + "testing" ) func prepareOciEcrRegistryTestDataSTS() *schema.ResourceData { d := resourceRegistryOciEcr().TestResourceData() - d.Set("name", "testSTSRegistry") - d.Set("type", "ecr") - d.Set("endpoint", "123456.dkr.ecr.us-west-1.amazonaws.com") - d.Set("is_private", true) + _ = d.Set("name", "testSTSRegistry") + _ = d.Set("type", "ecr") + _ = d.Set("endpoint", "123456.dkr.ecr.us-west-1.amazonaws.com") + _ = d.Set("is_private", true) var credential []map[string]interface{} cred := map[string]interface{}{ "credential_type": "sts", @@ -17,16 +20,16 @@ func prepareOciEcrRegistryTestDataSTS() *schema.ResourceData { "external_id": "sasdofiwhgowbsrgiornM=", } credential = append(credential, cred) - d.Set("credentials", credential) + _ = d.Set("credentials", credential) return d } func prepareOciEcrRegistryTestDataSecret() *schema.ResourceData { d := resourceRegistryOciEcr().TestResourceData() - d.Set("name", "testSecretRegistry") - d.Set("type", "ecr") - d.Set("endpoint", "123456.dkr.ecr.us-west-1.amazonaws.com") - d.Set("is_private", true) + _ = d.Set("name", "testSecretRegistry") + _ = d.Set("type", "ecr") + _ = d.Set("endpoint", "123456.dkr.ecr.us-west-1.amazonaws.com") + _ = d.Set("is_private", true) var credential []map[string]interface{} cred := map[string]interface{}{ "credential_type": "secret", @@ -38,192 +41,40 @@ func prepareOciEcrRegistryTestDataSecret() *schema.ResourceData { return d } -//func TestResourceRegistryEcrCreateSTS(t *testing.T) { -// d := prepareOciEcrRegistryTestDataSTS() -// m := &client.V1Client{} -// ctx := context.Background() -// diags := resourceRegistryEcrCreate(ctx, d, m) -// if len(diags) > 0 { -// t.Errorf("Unexpected diagnostics: %#v", diags) -// } -// if d.Id() != "test-sts-oci-reg-ecr-uid" { -// t.Errorf("Expected ID to be 'test-sts-oci-reg-ecr-uid', got %s", d.Id()) -// } -//} +func TestResourceRegistryEcrCreateSTS(t *testing.T) { + d := prepareOciEcrRegistryTestDataSTS() + ctx := context.Background() + diags := resourceRegistryEcrCreate(ctx, d, unitTestMockAPIClient) + assert.Empty(t, diags) +} -//func TestResourceRegistryEcrCreateSecret(t *testing.T) { -// d := prepareOciEcrRegistryTestDataSecret() -// m := &client.V1Client{} -// ctx := context.Background() -// diags := resourceRegistryEcrCreate(ctx, d, m) -// if len(diags) > 0 { -// t.Errorf("Unexpected diagnostics: %#v", diags) -// } -// if d.Id() != "test-secret-oci-reg-ecr-uid" { -// t.Errorf("Expected ID to be 'test-secret-oci-reg-ecr-uid', got %s", d.Id()) -// } -//} -// -//func TestResourceRegistryEcrCreateErr(t *testing.T) { -// d := prepareOciEcrRegistryTestDataSecret() -// m := &client.V1Client{} -// ctx := context.Background() -// diags := resourceRegistryEcrCreate(ctx, d, m) -// if diags[0].Summary != "covering error case" { -// t.Errorf("Unexpected diagnostics: %#v", diags) -// } -//} -// -//func TestResourceRegistryEcrReadSecret(t *testing.T) { -// d := prepareOciEcrRegistryTestDataSTS() -// d.SetId("test-reg-oci") -// -// m := &client.V1Client{} -// ctx := context.Background() -// diags := resourceRegistryEcrRead(ctx, d, m) -// cre := d.Get("credentials") -// assert.Equal(t, "secret", cre.([]interface{})[0].(map[string]interface{})["credential_type"]) -// assert.Equal(t, "ASDSDFRVDSVXCVSGDFGfd", cre.([]interface{})[0].(map[string]interface{})["access_key"]) -// if len(diags) > 0 { -// t.Errorf("Unexpected diagnostics: %#v", diags) -// } -// if d.Id() != "test-reg-oci" { -// t.Errorf("Expected ID to be 'test-reg-oci', got %s", d.Id()) -// } -//} -// -//func TestResourceRegistryEcrReadSTS(t *testing.T) { -// d := prepareOciEcrRegistryTestDataSTS() -// d.SetId("test-reg-oci") -// -// m := &client.V1Client{} -// ctx := context.Background() -// diags := resourceRegistryEcrRead(ctx, d, m) -// cre := d.Get("credentials") -// assert.Equal(t, "sts", cre.([]interface{})[0].(map[string]interface{})["credential_type"]) -// assert.Equal(t, "testARN", cre.([]interface{})[0].(map[string]interface{})["arn"]) -// assert.Equal(t, "testExternalID", cre.([]interface{})[0].(map[string]interface{})["external_id"]) -// if len(diags) > 0 { -// t.Errorf("Unexpected diagnostics: %#v", diags) -// } -// if d.Id() != "test-reg-oci" { -// t.Errorf("Expected ID to be 'test-reg-oci', got %s", d.Id()) -// } -//} -// -//func TestResourceRegistryEcrReadErr(t *testing.T) { -// d := prepareOciEcrRegistryTestDataSTS() -// -// m := &client.V1Client{} -// ctx := context.Background() -// diags := resourceRegistryEcrRead(ctx, d, m) -// if diags[0].Summary != "Registry type sts-wrong-type not implemented." { -// t.Errorf("Unexpected diagnostics: %#v", diags) -// } -//} -// -//func TestResourceRegistryEcrReadNil(t *testing.T) { -// d := prepareOciEcrRegistryTestDataSTS() -// m := &client.V1Client{} -// ctx := context.Background() -// diags := resourceRegistryEcrRead(ctx, d, m) -// if diags[0].Summary != "covering error case" { -// t.Errorf("Unexpected diagnostics: %#v", diags) -// } -//} -//func TestResourceRegistryEcrReadRegistryNil(t *testing.T) { -// d := prepareOciEcrRegistryTestDataSTS() -// m := &client.V1Client{} -// ctx := context.Background() -// resourceRegistryEcrRead(ctx, d, m) -// assert.Equal(t, "", d.Id()) -//} -// -//func TestResourceRegistryEcrUpdate(t *testing.T) { -// d := prepareOciEcrRegistryTestDataSTS() -// m := &client.V1Client{} -// ctx := context.Background() -// diags := resourceRegistryEcrUpdate(ctx, d, m) -// assert.Equal(t, "", d.Id()) -// if len(diags) > 0 { -// t.Errorf("Unexpected diagnostics: %#v", diags) -// } -//} -// -//func TestResourceRegistryEcrDelete(t *testing.T) { -// testCases := []struct { -// name string -// expectedReturnedUID string -// expectedReturnedDiags diag.Diagnostics -// expectedError error -// mock *mock.ClusterClientMock -// }{ -// { -// name: "EcrDelete", -// expectedReturnedUID: "", -// expectedReturnedDiags: diag.Diagnostics{}, -// expectedError: nil, -// mock: &mock.ClusterClientMock{ -// DeleteEcrRegistryErr: nil, -// }, -// }, -// { -// name: "EcrDeleteErr", -// expectedReturnedUID: "", -// expectedReturnedDiags: diag.FromErr(errors.New("covering error case")), -// expectedError: errors.New("covering error case"), -// mock: &mock.ClusterClientMock{ -// DeleteEcrRegistryErr: errors.New("covering error case"), -// }, -// }, -// } -// -// for _, tc := range testCases { -// t.Run(tc.name, func(t *testing.T) { -// -// d := prepareOciEcrRegistryTestDataSTS() -// -// h := &client.V1Client{} -// -// ctx := context.Background() -// diags := resourceRegistryEcrDelete(ctx, d, h) -// assert.Equal(t, "", d.Id()) -// -// if len(diags) != len(tc.expectedReturnedDiags) { -// t.Fail() -// t.Logf("Expected diags count: %v", len(tc.expectedReturnedDiags)) -// t.Logf("Actual diags count: %v", len(diags)) -// } else { -// for i := range diags { -// if diags[i].Severity != tc.expectedReturnedDiags[i].Severity { -// t.Fail() -// t.Logf("Expected severity: %v", tc.expectedReturnedDiags[i].Severity) -// t.Logf("Actual severity: %v", diags[i].Severity) -// } -// if diags[i].Summary != tc.expectedReturnedDiags[i].Summary { -// t.Fail() -// t.Logf("Expected summary: %v", tc.expectedReturnedDiags[i].Summary) -// t.Logf("Actual summary: %v", diags[i].Summary) -// } -// if diags[i].Detail != tc.expectedReturnedDiags[i].Detail { -// t.Fail() -// t.Logf("Expected detail: %v", tc.expectedReturnedDiags[i].Detail) -// t.Logf("Actual detail: %v", diags[i].Detail) -// } -// } -// } -// }) -// } -// -//} -// -//func TestResourceRegistryEcrUpdateErr(t *testing.T) { -// d := prepareOciEcrRegistryTestDataSTS() -// m := &client.V1Client{} -// ctx := context.Background() -// diags := resourceRegistryEcrUpdate(ctx, d, m) -// assert.Equal(t, "", d.Id()) -// if diags[0].Summary != "covering error case" { -// t.Errorf("Unexpected diagnostics: %#v", diags) -// } -//} +func TestResourceRegistryEcrCreateSecret(t *testing.T) { + d := prepareOciEcrRegistryTestDataSecret() + ctx := context.Background() + diags := resourceRegistryEcrCreate(ctx, d, unitTestMockAPIClient) + assert.Empty(t, diags) +} + +func TestResourceRegistryEcrRead(t *testing.T) { + d := prepareOciEcrRegistryTestDataSTS() + ctx := context.Background() + d.SetId("test-id") + diags := resourceRegistryEcrRead(ctx, d, unitTestMockAPIClient) + assert.Empty(t, diags) +} + +func TestResourceRegistryEcrUpdate(t *testing.T) { + d := prepareOciEcrRegistryTestDataSTS() + ctx := context.Background() + d.SetId("test-id") + diags := resourceRegistryEcrUpdate(ctx, d, unitTestMockAPIClient) + assert.Empty(t, diags) +} + +func TestResourceRegistryEcrDelete(t *testing.T) { + d := prepareOciEcrRegistryTestDataSTS() + ctx := context.Background() + d.SetId("test-id") + diags := resourceRegistryEcrDelete(ctx, d, unitTestMockAPIClient) + assert.Empty(t, diags) +} diff --git a/spectrocloud/resource_team_test.go b/spectrocloud/resource_team_test.go index e94f5838..aa6a14c1 100644 --- a/spectrocloud/resource_team_test.go +++ b/spectrocloud/resource_team_test.go @@ -1,6 +1,7 @@ package spectrocloud import ( + "context" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/spectrocloud/palette-sdk-go/api/models" "github.com/stretchr/testify/assert" @@ -288,3 +289,66 @@ func TestToTeamWorkspaceRoleMapping(t *testing.T) { }) } } + +func prepareBaseTeamTestdata() *schema.ResourceData { + d := resourceTeam().TestResourceData() + _ = d.Set("name", "test-team") + _ = d.Set("project_role_mapping", []interface{}{ + map[string]interface{}{ + "id": "test-role-id", + "roles": []string{ + "projectAdminTest", + }, + }, + }) + _ = d.Set("tenant_role_mapping", []string{"test-1"}) + _ = d.Set("workspace_role_mapping", []interface{}{ + map[string]interface{}{ + "id": "test-workspace-mapping-id", + "workspace": []interface{}{ + map[string]interface{}{ + "id": "tw-id", + "roles": []string{ + "ws-test-admin", + }, + }, + }, + }, + }) + return d +} + +func TestResourceTeamCreate(t *testing.T) { + d := prepareBaseTeamTestdata() + var ctx context.Context + diags := resourceTeamCreate(ctx, d, unitTestMockAPIClient) + assert.Empty(t, diags) + assert.Equal(t, "team-123", d.Id()) +} + +func TestResourceTeamRead(t *testing.T) { + d := prepareBaseTeamTestdata() + d.SetId("team-123") + var ctx context.Context + diags := resourceTeamRead(ctx, d, unitTestMockAPIClient) + assert.Empty(t, diags) + assert.Equal(t, "team-123", d.Id()) +} + +func TestResourceTeamUpdate(t *testing.T) { + d := prepareBaseTeamTestdata() + d.SetId("team-123") + var ctx context.Context + diags := resourceTeamUpdate(ctx, d, unitTestMockAPIClient) + assert.Empty(t, diags) + assert.Equal(t, "team-123", d.Id()) +} + +func TestResourceTeamDelete(t *testing.T) { + d := prepareBaseTeamTestdata() + d.SetId("team-123") + var ctx context.Context + diags := resourceTeamDelete(ctx, d, unitTestMockAPIClient) + assert.Empty(t, diags) + +} diff --git a/spectrocloud/resource_workspace_test.go b/spectrocloud/resource_workspace_test.go index 41c72d41..c398fcb8 100644 --- a/spectrocloud/resource_workspace_test.go +++ b/spectrocloud/resource_workspace_test.go @@ -1,6 +1,7 @@ package spectrocloud import ( + "context" "testing" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -103,3 +104,62 @@ func TestToWorkspace(t *testing.T) { }) } } + +func prepareResourceWorkspace() *schema.ResourceData { + d := resourceWorkspace().TestResourceData() + d.SetId("test-ws-id") + _ = d.Set("name", "test-ws") + _ = d.Set("tags", []string{"dev:test"}) + _ = d.Set("description", "test description") + var c []interface{} + c = append(c, map[string]interface{}{ + "uid": "test-cluster-id", + }) + var bp []interface{} + bp = append(bp, map[string]interface{}{ + "prefix": "test-prefix", + "backup_location_id": "test-location-id", + "schedule": "0 1 * * *", + "expiry_in_hour": 1, + "include_disks": false, + "include_cluster_resources": true, + "namespaces": []string{"ns1", "ns2"}, + "cluster_uids": []string{"cluster1", "cluster2"}, + "include_all_clusters": false, + }) + _ = d.Set("backup_policy", bp) + var subjects []interface{} + subjects = append(subjects, map[string]interface{}{ + "type": "User", + "name": "test-name-user", + "namespace": "ns1", + }) + var rbacs []interface{} + rbacs = append(rbacs, map[string]interface{}{ + "type": "RoleBinding", + "namespace": "ns1", + "role": map[string]string{ + "test": "admin", + }, + "subjects": subjects, + }) + _ = d.Set("cluster_rbac_binding", rbacs) + var ns []interface{} + ns = append(ns, map[string]interface{}{ + "name": "test-ns-name", + "resource_allocation": map[string]string{ + "test": "test", + }, + "images_blacklist": []string{"test-list"}, + }) + _ = d.Set("namespaces", ns) + + return d +} + +func TestResourceWorkspaceDelete(t *testing.T) { + d := prepareResourceWorkspace() + var ctx context.Context + diags := resourceWorkspaceDelete(ctx, d, unitTestMockAPIClient) + assert.Empty(t, diags) +} diff --git a/spectrocloud/schema/testcommon.go b/spectrocloud/schema/testcommon.go deleted file mode 100644 index d8c23bd0..00000000 --- a/spectrocloud/schema/testcommon.go +++ /dev/null @@ -1,14 +0,0 @@ -package schema - -import "testing" - -func CompareErrors(t *testing.T, actual error, expected error) { - if actual != nil && expected != nil { - if actual.Error() != expected.Error() { - t.Errorf("Unexpected error: %v, expected: %v", actual.Error(), expected.Error()) - } - } - if (actual == nil && expected != nil) || (actual != nil && expected == nil) { - t.Errorf("One of errors is nil while another is not") - } -} diff --git a/spectrocloud/schemas/schemas_test.go b/spectrocloud/schemas/schemas_test.go new file mode 100644 index 00000000..c640cd7e --- /dev/null +++ b/spectrocloud/schemas/schemas_test.go @@ -0,0 +1,389 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestAppPackSchema(t *testing.T) { + s := AppPackSchema() + + assert.Equal(t, schema.TypeList, s.Type) + assert.Equal(t, true, s.Required) + assert.Equal(t, "A list of packs to be applied to the application profile.", s.Description) + + elemSchema, ok := s.Elem.(*schema.Resource) + assert.True(t, ok) + assert.NotNil(t, elemSchema) + + assert.Equal(t, schema.TypeString, elemSchema.Schema["type"].Type) + assert.Equal(t, true, elemSchema.Schema["type"].Optional) + assert.Equal(t, "The type of Pack. Allowed values are `container`, `helm`, `manifest`, or `operator-instance`.", elemSchema.Schema["type"].Description) + assert.Equal(t, "spectro", elemSchema.Schema["type"].Default) + + assert.Equal(t, schema.TypeString, elemSchema.Schema["source_app_tier"].Type) + assert.Equal(t, true, elemSchema.Schema["source_app_tier"].Optional) + assert.Equal(t, "The unique id of the pack to be used as the source for the pack.", elemSchema.Schema["source_app_tier"].Description) + + assert.Equal(t, schema.TypeString, elemSchema.Schema["registry_uid"].Type) + assert.Equal(t, true, elemSchema.Schema["registry_uid"].Optional) + assert.Equal(t, true, elemSchema.Schema["registry_uid"].Computed) + assert.Equal(t, "The unique id of the registry to be used for the pack.", elemSchema.Schema["registry_uid"].Description) + + assert.Equal(t, schema.TypeString, elemSchema.Schema["uid"].Type) + assert.Equal(t, true, elemSchema.Schema["uid"].Optional) + assert.Equal(t, true, elemSchema.Schema["uid"].Computed) + assert.Equal(t, "The unique id of the pack. This is a computed field and is not required to be set.", elemSchema.Schema["uid"].Description) + + assert.Equal(t, schema.TypeString, elemSchema.Schema["name"].Type) + assert.Equal(t, true, elemSchema.Schema["name"].Required) + assert.Equal(t, "The name of the specified pack.", elemSchema.Schema["name"].Description) + + assert.Equal(t, schema.TypeMap, elemSchema.Schema["properties"].Type) + assert.Equal(t, true, elemSchema.Schema["properties"].Optional) + assert.Equal(t, "The various properties required by different database tiers eg: `databaseName` and `databaseVolumeSize` size for Redis etc.", elemSchema.Schema["properties"].Description) + + assert.Equal(t, schema.TypeInt, elemSchema.Schema["install_order"].Type) + assert.Equal(t, true, elemSchema.Schema["install_order"].Optional) + assert.Equal(t, 0, elemSchema.Schema["install_order"].Default) + assert.Equal(t, "The installation priority order of the app profile. The order of priority goes from lowest number to highest number. For example, a value of `-3` would be installed before an app profile with a higher number value. No upper and lower limits exist, and you may specify positive and negative integers. The default value is `0`. ", elemSchema.Schema["install_order"].Description) + + assert.Equal(t, schema.TypeList, elemSchema.Schema["manifest"].Type) + assert.Equal(t, true, elemSchema.Schema["manifest"].Optional) + assert.Equal(t, "The manifest of the pack.", elemSchema.Schema["manifest"].Description) + + manifestElemSchema, ok := elemSchema.Schema["manifest"].Elem.(*schema.Resource) + assert.True(t, ok) + assert.NotNil(t, manifestElemSchema) + + assert.Equal(t, schema.TypeString, manifestElemSchema.Schema["uid"].Type) + assert.Equal(t, true, manifestElemSchema.Schema["uid"].Computed) + + assert.Equal(t, schema.TypeString, manifestElemSchema.Schema["name"].Type) + assert.Equal(t, true, manifestElemSchema.Schema["name"].Required) + assert.Equal(t, "The name of the manifest.", manifestElemSchema.Schema["name"].Description) + + assert.Equal(t, schema.TypeString, manifestElemSchema.Schema["content"].Type) + assert.Equal(t, true, manifestElemSchema.Schema["content"].Required) + assert.Equal(t, "The content of the manifest.", manifestElemSchema.Schema["content"].Description) + assert.NotNil(t, manifestElemSchema.Schema["content"].DiffSuppressFunc) + + assert.Equal(t, schema.TypeString, elemSchema.Schema["tag"].Type) + assert.Equal(t, true, elemSchema.Schema["tag"].Optional) + assert.Equal(t, "The identifier or version to label the pack.", elemSchema.Schema["tag"].Description) + + assert.Equal(t, schema.TypeString, elemSchema.Schema["values"].Type) + assert.Equal(t, true, elemSchema.Schema["values"].Optional) + assert.Equal(t, "The values to be used for the pack. This is a stringified JSON object.", elemSchema.Schema["values"].Description) + assert.NotNil(t, elemSchema.Schema["values"].DiffSuppressFunc) +} + +func TestClusterLocationSchema(t *testing.T) { + s := ClusterLocationSchema() + + assert.Equal(t, schema.TypeList, s.Type) + assert.Equal(t, true, s.Optional) + + assert.NotNil(t, s.DiffSuppressFunc) + + elemSchema, ok := s.Elem.(*schema.Resource) + assert.True(t, ok) + assert.NotNil(t, elemSchema) + + assert.Equal(t, schema.TypeString, elemSchema.Schema["country_code"].Type) + assert.Equal(t, true, elemSchema.Schema["country_code"].Optional) + assert.Equal(t, "", elemSchema.Schema["country_code"].Default) + assert.Equal(t, "The country code of the country the cluster is located in.", elemSchema.Schema["country_code"].Description) + + assert.Equal(t, schema.TypeString, elemSchema.Schema["country_name"].Type) + assert.Equal(t, true, elemSchema.Schema["country_name"].Optional) + assert.Equal(t, "", elemSchema.Schema["country_name"].Default) + assert.Equal(t, "The name of the country.", elemSchema.Schema["country_name"].Description) + + assert.Equal(t, schema.TypeString, elemSchema.Schema["region_code"].Type) + assert.Equal(t, true, elemSchema.Schema["region_code"].Optional) + assert.Equal(t, "", elemSchema.Schema["region_code"].Default) + assert.Equal(t, "The region code of where the cluster is located in.", elemSchema.Schema["region_code"].Description) + + assert.Equal(t, schema.TypeString, elemSchema.Schema["region_name"].Type) + assert.Equal(t, true, elemSchema.Schema["region_name"].Optional) + assert.Equal(t, "", elemSchema.Schema["region_name"].Default) + assert.Equal(t, "The name of the region.", elemSchema.Schema["region_name"].Description) + + assert.Equal(t, schema.TypeFloat, elemSchema.Schema["latitude"].Type) + assert.Equal(t, true, elemSchema.Schema["latitude"].Required) + assert.Equal(t, "The latitude coordinates value.", elemSchema.Schema["latitude"].Description) + + assert.Equal(t, schema.TypeFloat, elemSchema.Schema["longitude"].Type) + assert.Equal(t, true, elemSchema.Schema["longitude"].Required) + assert.Equal(t, "The longitude coordinates value.", elemSchema.Schema["longitude"].Description) +} + +func TestVMVolumeSchema(t *testing.T) { + s := VMVolumeSchema() + + assert.Equal(t, schema.TypeList, s.Type) + assert.Equal(t, true, s.Optional) + + assert.NotNil(t, s.Elem) + elemSchema, ok := s.Elem.(*schema.Resource) + assert.True(t, ok) + assert.NotNil(t, elemSchema) + + assert.NotNil(t, elemSchema.Schema["name"]) + assert.Equal(t, schema.TypeString, elemSchema.Schema["name"].Type) + assert.Equal(t, true, elemSchema.Schema["name"].Required) + + assert.NotNil(t, elemSchema.Schema["container_disk"]) + assert.Equal(t, schema.TypeSet, elemSchema.Schema["container_disk"].Type) + assert.Equal(t, true, elemSchema.Schema["container_disk"].Optional) + assert.NotNil(t, elemSchema.Schema["container_disk"].Elem) + containerDiskSchema, ok := elemSchema.Schema["container_disk"].Elem.(*schema.Resource) + assert.True(t, ok) + assert.Equal(t, schema.TypeString, containerDiskSchema.Schema["image_url"].Type) + assert.Equal(t, true, containerDiskSchema.Schema["image_url"].Required) + + assert.NotNil(t, elemSchema.Schema["cloud_init_no_cloud"]) + assert.Equal(t, schema.TypeSet, elemSchema.Schema["cloud_init_no_cloud"].Type) + assert.Equal(t, true, elemSchema.Schema["cloud_init_no_cloud"].Optional) + assert.NotNil(t, elemSchema.Schema["cloud_init_no_cloud"].Elem) + cloudInitDiskSchema, ok := elemSchema.Schema["cloud_init_no_cloud"].Elem.(*schema.Resource) + assert.True(t, ok) + assert.Equal(t, schema.TypeString, cloudInitDiskSchema.Schema["user_data"].Type) + assert.Equal(t, true, cloudInitDiskSchema.Schema["user_data"].Required) + + assert.NotNil(t, elemSchema.Schema["data_volume"]) + assert.Equal(t, schema.TypeSet, elemSchema.Schema["data_volume"].Type) + assert.Equal(t, true, elemSchema.Schema["data_volume"].Optional) + assert.NotNil(t, elemSchema.Schema["data_volume"].Elem) + dataVolumeSchema, ok := elemSchema.Schema["data_volume"].Elem.(*schema.Resource) + assert.True(t, ok) + assert.Equal(t, schema.TypeString, dataVolumeSchema.Schema["storage"].Type) + assert.Equal(t, true, dataVolumeSchema.Schema["storage"].Required) +} + +func TestResourceContainerDiskHash(t *testing.T) { + v := map[string]interface{}{ + "image_url": "http://example.com/image", + } + expected := int(hash("http://example.com/image-")) + assert.Equal(t, expected, resourceContainerDiskHash(v)) +} + +func TestResourceCloudInitDiskHash(t *testing.T) { + v := map[string]interface{}{ + "user_data": "user-data-content", + } + expected := int(hash("user-data-content-")) + assert.Equal(t, expected, resourceCloudInitDiskHash(v)) +} + +func TestResourceDataVolumeHash(t *testing.T) { + v := map[string]interface{}{ + "storage": "100GiB", + } + expected := int(hash("100GiB-")) + assert.Equal(t, expected, resourceDataVolumeHash(v)) +} + +func TestVMNicSchema(t *testing.T) { + s := VMNicSchema() + + assert.Equal(t, schema.TypeSet, s.Type) + assert.Equal(t, true, s.Optional) + + assert.NotNil(t, s.Elem) + elemSchema, ok := s.Elem.(*schema.Resource) + assert.True(t, ok) + assert.NotNil(t, elemSchema) + + assert.NotNil(t, elemSchema.Schema["nic"]) + assert.Equal(t, schema.TypeList, elemSchema.Schema["nic"].Type) + assert.Equal(t, true, elemSchema.Schema["nic"].Optional) + assert.NotNil(t, elemSchema.Schema["nic"].Elem) + nicElemSchema, ok := elemSchema.Schema["nic"].Elem.(*schema.Resource) + assert.True(t, ok) + + assert.NotNil(t, nicElemSchema.Schema["name"]) + assert.Equal(t, schema.TypeString, nicElemSchema.Schema["name"].Type) + assert.Equal(t, true, nicElemSchema.Schema["name"].Required) + + assert.NotNil(t, nicElemSchema.Schema["multus"]) + assert.Equal(t, schema.TypeList, nicElemSchema.Schema["multus"].Type) + assert.Equal(t, true, nicElemSchema.Schema["multus"].Optional) + assert.Equal(t, 1, nicElemSchema.Schema["multus"].MaxItems) + assert.NotNil(t, nicElemSchema.Schema["multus"].Elem) + multusElemSchema, ok := nicElemSchema.Schema["multus"].Elem.(*schema.Resource) + assert.True(t, ok) + + assert.NotNil(t, multusElemSchema.Schema["network_name"]) + assert.Equal(t, schema.TypeString, multusElemSchema.Schema["network_name"].Type) + assert.Equal(t, true, multusElemSchema.Schema["network_name"].Required) + + assert.NotNil(t, multusElemSchema.Schema["default"]) + assert.Equal(t, schema.TypeBool, multusElemSchema.Schema["default"].Type) + assert.Equal(t, true, multusElemSchema.Schema["default"].Optional) + + assert.NotNil(t, nicElemSchema.Schema["network_type"]) + assert.Equal(t, schema.TypeString, nicElemSchema.Schema["network_type"].Type) + assert.Equal(t, true, nicElemSchema.Schema["network_type"].Optional) +} + +func TestVMInterfaceSchema(t *testing.T) { + s := VMInterfaceSchema() + + assert.Equal(t, schema.TypeList, s.Type) + assert.Equal(t, true, s.Required) + + assert.NotNil(t, s.Elem) + elemSchema, ok := s.Elem.(*schema.Resource) + assert.True(t, ok) + assert.NotNil(t, elemSchema) + + assert.NotNil(t, elemSchema.Schema["name"]) + assert.Equal(t, schema.TypeString, elemSchema.Schema["name"].Type) + assert.Equal(t, true, elemSchema.Schema["name"].Required) + + assert.NotNil(t, elemSchema.Schema["type"]) + assert.Equal(t, schema.TypeString, elemSchema.Schema["type"].Type) + assert.Equal(t, true, elemSchema.Schema["type"].Optional) + assert.Equal(t, "masquerade", elemSchema.Schema["type"].Default) + + assert.NotNil(t, elemSchema.Schema["model"]) + assert.Equal(t, schema.TypeString, elemSchema.Schema["model"].Type) + assert.Equal(t, true, elemSchema.Schema["model"].Optional) + assert.Equal(t, "virtio", elemSchema.Schema["model"].Default) +} + +func TestVMDiskSchema(t *testing.T) { + s := VMDiskSchema() + + assert.Equal(t, schema.TypeList, s.Type) + assert.Equal(t, true, s.Required) + + assert.NotNil(t, s.Elem) + elemSchema, ok := s.Elem.(*schema.Resource) + assert.True(t, ok) + assert.NotNil(t, elemSchema) + + assert.NotNil(t, elemSchema.Schema["name"]) + assert.Equal(t, schema.TypeString, elemSchema.Schema["name"].Type) + assert.Equal(t, true, elemSchema.Schema["name"].Required) + + assert.NotNil(t, elemSchema.Schema["bus"]) + assert.Equal(t, schema.TypeString, elemSchema.Schema["bus"].Type) + assert.Equal(t, true, elemSchema.Schema["bus"].Required) +} + +func TestVMDeviceSchema(t *testing.T) { + s := VMDeviceSchema() + + assert.Equal(t, schema.TypeSet, s.Type) + assert.Equal(t, true, s.Optional) + assert.Equal(t, 1, s.MaxItems) + + assert.NotNil(t, s.Elem) + elemSchema, ok := s.Elem.(*schema.Resource) + assert.True(t, ok) + assert.NotNil(t, elemSchema) + + // Test 'disk' schema + diskSchema, ok := elemSchema.Schema["disk"] + assert.True(t, ok) + assert.Equal(t, schema.TypeList, diskSchema.Type) + assert.Equal(t, true, diskSchema.Required) + assert.NotNil(t, diskSchema.Elem) + + // Test 'interface' schema + interfaceSchema, ok := elemSchema.Schema["interface"] + assert.True(t, ok) + assert.Equal(t, schema.TypeList, interfaceSchema.Type) + assert.Equal(t, true, interfaceSchema.Required) + assert.NotNil(t, interfaceSchema.Elem) +} + +func TestPackSchema(t *testing.T) { + s := PackSchema() + + assert.Equal(t, schema.TypeList, s.Type) + assert.Equal(t, true, s.Optional) + assert.Equal(t, "For packs of type `spectro`, `helm`, and `manifest`, at least one pack must be specified.", s.Description) + + assert.NotNil(t, s.Elem) + elemSchema, ok := s.Elem.(*schema.Resource) + assert.True(t, ok) + assert.NotNil(t, elemSchema) + + // Test 'uid' schema + uidSchema, ok := elemSchema.Schema["uid"] + assert.True(t, ok) + assert.Equal(t, schema.TypeString, uidSchema.Type) + assert.Equal(t, true, uidSchema.Optional) + assert.Equal(t, true, uidSchema.Computed) + assert.Equal(t, "The unique identifier of the pack. The value can be looked up using the [`spectrocloud_pack`](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs/data-sources/pack) data source. This value is required if the pack type is `spectro` and for `helm` if the chart is from a public helm registry.", uidSchema.Description) + + // Test 'type' schema + typeSchema, ok := elemSchema.Schema["type"] + assert.True(t, ok) + assert.Equal(t, schema.TypeString, typeSchema.Type) + assert.Equal(t, true, typeSchema.Optional) + assert.Equal(t, "spectro", typeSchema.Default) + assert.Equal(t, "The type of the pack. Allowed values are `spectro`, `manifest`, `helm`, or `oci`. The default value is spectro. If using an OCI registry for pack, set the type to `oci`.", typeSchema.Description) + + // Test 'name' schema + nameSchema, ok := elemSchema.Schema["name"] + assert.True(t, ok) + assert.Equal(t, schema.TypeString, nameSchema.Type) + assert.Equal(t, true, nameSchema.Required) + assert.Equal(t, "The name of the pack. The name must be unique within the cluster profile. ", nameSchema.Description) + + // Test 'registry_uid' schema + registryUIDSchema, ok := elemSchema.Schema["registry_uid"] + assert.True(t, ok) + assert.Equal(t, schema.TypeString, registryUIDSchema.Type) + assert.Equal(t, true, registryUIDSchema.Optional) + assert.Equal(t, "The registry UID of the pack. The registry UID is the unique identifier of the registry. This attribute is required if there is more than one registry that contains a pack with the same name. ", registryUIDSchema.Description) + + // Test 'tag' schema + tagSchema, ok := elemSchema.Schema["tag"] + assert.True(t, ok) + assert.Equal(t, schema.TypeString, tagSchema.Type) + assert.Equal(t, true, tagSchema.Optional) + assert.Equal(t, "The tag of the pack. The tag is the version of the pack. This attribute is required if the pack type is `spectro` or `helm`. ", tagSchema.Description) + + // Test 'values' schema + valuesSchema, ok := elemSchema.Schema["values"] + assert.True(t, ok) + assert.Equal(t, schema.TypeString, valuesSchema.Type) + assert.Equal(t, true, valuesSchema.Optional) + assert.Equal(t, "The values of the pack. The values are the configuration values of the pack. The values are specified in YAML format. ", valuesSchema.Description) + + // Test 'manifest' schema + manifestSchema, ok := elemSchema.Schema["manifest"] + assert.True(t, ok) + assert.Equal(t, schema.TypeList, manifestSchema.Type) + assert.Equal(t, true, manifestSchema.Optional) + assert.NotNil(t, manifestSchema.Elem) + manifestElemSchema, ok := manifestSchema.Elem.(*schema.Resource) + assert.True(t, ok) + assert.NotNil(t, manifestElemSchema) + + // Test 'manifest' nested schema + manifestUIDSchema, ok := manifestElemSchema.Schema["uid"] + assert.True(t, ok) + assert.Equal(t, schema.TypeString, manifestUIDSchema.Type) + assert.Equal(t, true, manifestUIDSchema.Computed) + + manifestNameSchema, ok := manifestElemSchema.Schema["name"] + assert.True(t, ok) + assert.Equal(t, schema.TypeString, manifestNameSchema.Type) + assert.Equal(t, true, manifestNameSchema.Required) + assert.Equal(t, "The name of the manifest. The name must be unique within the pack. ", manifestNameSchema.Description) + + manifestContentSchema, ok := manifestElemSchema.Schema["content"] + assert.True(t, ok) + assert.Equal(t, schema.TypeString, manifestContentSchema.Type) + assert.Equal(t, true, manifestContentSchema.Required) + assert.Equal(t, "The content of the manifest. The content is the YAML content of the manifest. ", manifestContentSchema.Description) +} diff --git a/spectrocloud/workspace_test.go b/spectrocloud/workspace_test.go new file mode 100644 index 00000000..a0148a64 --- /dev/null +++ b/spectrocloud/workspace_test.go @@ -0,0 +1,271 @@ +package spectrocloud + +import ( + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/spectrocloud/palette-sdk-go/api/models" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestToWorkspacePolicies(t *testing.T) { + // Initialize the resource data with the schema from resourceWorkspace + resourceData := resourceWorkspace().TestResourceData() + _ = resourceData.Set("backup_policy", []interface{}{ + map[string]interface{}{ + "include_all_clusters": true, + "cluster_uids": schema.NewSet(schema.HashString, []interface{}{"cluster-uid-1", "cluster-uid-2"}), + }}) + + policies := toWorkspacePolicies(resourceData) + + assert.NotNil(t, policies) + assert.NotNil(t, policies.BackupPolicy) + assert.Equal(t, true, policies.BackupPolicy.IncludeAllClusters) + assert.Equal(t, []string{"cluster-uid-1", "cluster-uid-2"}, policies.BackupPolicy.ClusterUids) +} + +func TestToWorkspaceBackupPolicy(t *testing.T) { + resourceData := resourceWorkspace().TestResourceData() + _ = resourceData.Set("backup_policy", []interface{}{ + map[string]interface{}{ + "include_all_clusters": true, + "cluster_uids": schema.NewSet(schema.HashString, []interface{}{"cluster-uid-1", "cluster-uid-2"}), + }, + }) + + backupPolicy := toWorkspaceBackupPolicy(resourceData) + + assert.NotNil(t, backupPolicy) + assert.Equal(t, true, backupPolicy.IncludeAllClusters) + assert.Equal(t, []string{"cluster-uid-1", "cluster-uid-2"}, backupPolicy.ClusterUids) +} + +func TestGetExtraFields(t *testing.T) { + resourceData := resourceWorkspace().TestResourceData() + _ = resourceData.Set("backup_policy", []interface{}{ + map[string]interface{}{ + "include_all_clusters": true, + "cluster_uids": schema.NewSet(schema.HashString, []interface{}{"cluster-uid-1", "cluster-uid-2"}), + }, + }) + + includeAllClusters, clusterUIDs := getExtraFields(resourceData) + + assert.Equal(t, true, includeAllClusters) + assert.Equal(t, []string{"cluster-uid-1", "cluster-uid-2"}, clusterUIDs) +} + +func TestFlattenWorkspaceClusters(t *testing.T) { + workspace := &models.V1Workspace{ + Spec: &models.V1WorkspaceSpec{ + ClusterRefs: []*models.V1WorkspaceClusterRef{ + {ClusterUID: "cluster-1"}, + {ClusterUID: "cluster-2"}, + }, + }, + } + + result := flattenWorkspaceClusters(workspace) + expected := []interface{}{ + map[string]interface{}{"uid": "cluster-1"}, + map[string]interface{}{"uid": "cluster-2"}, + } + + assert.Equal(t, expected, result) +} + +func TestFlattenWorkspaceClusters_Empty(t *testing.T) { + workspace := &models.V1Workspace{ + Spec: &models.V1WorkspaceSpec{ + ClusterRefs: []*models.V1WorkspaceClusterRef{}, + }, + } + + result := flattenWorkspaceClusters(workspace) + + assert.Equal(t, 0, len(result)) +} + +func TestFlattenWorkspaceBackupPolicy(t *testing.T) { + backup := &models.V1WorkspaceBackup{ + Spec: &models.V1WorkspaceBackupSpec{ + Config: &models.V1WorkspaceBackupConfig{ + BackupConfig: &models.V1ClusterBackupConfig{ + BackupLocationName: "test", + BackupLocationUID: "test-id", + BackupName: "test-back", + BackupPrefix: "test-", + DurationInHours: 1, + IncludeAllDisks: false, + IncludeClusterResources: false, + LocationType: "ss", + Namespaces: []string{"test-ns"}, + Schedule: &models.V1ClusterFeatureSchedule{ + ScheduledRunTime: "0 0 0 * *", + }, + }, + ClusterUids: []string{"cluster-1", "cluster-2"}, + IncludeAllClusters: true, + }, + }, + } + + _ = flattenWorkspaceBackupPolicy(backup) +} + +func TestFlattenWorkspaceClusterNamespaces(t *testing.T) { + items := []*models.V1WorkspaceClusterNamespace{ + { + Name: "namespace-1", + NamespaceResourceAllocation: &models.V1WorkspaceNamespaceResourceAllocation{ + DefaultResourceAllocation: &models.V1WorkspaceResourceAllocation{ + CPUCores: 4.5, + MemoryMiB: 2048.8, + }, + }, + Image: &models.V1WorkspaceNamespaceImage{ + BlackListedImages: []string{"image1", "image2"}, + }, + }, + { + Name: "namespace-2", + NamespaceResourceAllocation: &models.V1WorkspaceNamespaceResourceAllocation{ + DefaultResourceAllocation: &models.V1WorkspaceResourceAllocation{ + CPUCores: 2.0, + MemoryMiB: 1024.0, + }, + }, + }, + } + + result := flattenWorkspaceClusterNamespaces(items) + + assert.Equal(t, 2, len(result)) + + ns1 := result[0].(map[string]interface{}) + assert.Equal(t, "namespace-1", ns1["name"]) + assert.Equal(t, "5", ns1["resource_allocation"].(map[string]interface{})["cpu_cores"]) + assert.Equal(t, "2049", ns1["resource_allocation"].(map[string]interface{})["memory_MiB"]) + assert.Equal(t, []string{"image1", "image2"}, ns1["images_blacklist"]) + + ns2 := result[1].(map[string]interface{}) + assert.Equal(t, "namespace-2", ns2["name"]) + assert.Equal(t, "2", ns2["resource_allocation"].(map[string]interface{})["cpu_cores"]) + assert.Equal(t, "1024", ns2["resource_allocation"].(map[string]interface{})["memory_MiB"]) + assert.Nil(t, ns2["images_blacklist"]) +} + +func TestFlattenWorkspaceClusterNamespaces_EmptyList(t *testing.T) { + items := []*models.V1WorkspaceClusterNamespace{} + result := flattenWorkspaceClusterNamespaces(items) + assert.Equal(t, 0, len(result)) +} + +func TestFlattenWorkspaceClusterNamespaces_NilImage(t *testing.T) { + items := []*models.V1WorkspaceClusterNamespace{ + { + Name: "namespace-3", + NamespaceResourceAllocation: &models.V1WorkspaceNamespaceResourceAllocation{ + DefaultResourceAllocation: &models.V1WorkspaceResourceAllocation{ + CPUCores: 8.0, + MemoryMiB: 4096.0, + }, + }, + }, + } + + result := flattenWorkspaceClusterNamespaces(items) + + assert.Equal(t, 1, len(result)) + + ns := result[0].(map[string]interface{}) + assert.Equal(t, "namespace-3", ns["name"]) + assert.Equal(t, "8", ns["resource_allocation"].(map[string]interface{})["cpu_cores"]) + assert.Equal(t, "4096", ns["resource_allocation"].(map[string]interface{})["memory_MiB"]) + assert.Nil(t, ns["images_blacklist"]) +} + +func TestToWorkspaceNamespace(t *testing.T) { + clusterRbacBinding := map[string]interface{}{ + "name": "namespace-1", + "resource_allocation": map[string]interface{}{ + "cpu_cores": "4.5", + "memory_MiB": "2048.8", + }, + "images_blacklist": []interface{}{"image1", "image2"}, + } + + result := toWorkspaceNamespace(clusterRbacBinding) + + assert.NotNil(t, result) + assert.Equal(t, "namespace-1", result.Name) + assert.Equal(t, 4.5, result.NamespaceResourceAllocation.DefaultResourceAllocation.CPUCores) + assert.Equal(t, 2048.8, result.NamespaceResourceAllocation.DefaultResourceAllocation.MemoryMiB) + assert.Equal(t, []string{"image1", "image2"}, result.Image.BlackListedImages) + +} + +func TestToWorkspaceNamespace_InvalidCPU(t *testing.T) { + clusterRbacBinding := map[string]interface{}{ + "name": "namespace-1", + "resource_allocation": map[string]interface{}{ + "cpu_cores": "invalid", + "memory_MiB": "2048.8", + }, + "images_blacklist": []interface{}{"image1", "image2"}, + } + + result := toWorkspaceNamespace(clusterRbacBinding) + + assert.Nil(t, result) +} + +func TestToWorkspaceNamespace_InvalidMemory(t *testing.T) { + clusterRbacBinding := map[string]interface{}{ + "name": "namespace-1", + "resource_allocation": map[string]interface{}{ + "cpu_cores": "4.5", + "memory_MiB": "invalid", + }, + "images_blacklist": []interface{}{"image1", "image2"}, + } + + result := toWorkspaceNamespace(clusterRbacBinding) + + assert.Nil(t, result) +} + +func TestToWorkspaceNamespace_NoBlacklist(t *testing.T) { + clusterRbacBinding := map[string]interface{}{ + "name": "namespace-1", + "resource_allocation": map[string]interface{}{ + "cpu_cores": "4.5", + "memory_MiB": "2048.8", + }, + } + + result := toWorkspaceNamespace(clusterRbacBinding) + + assert.NotNil(t, result) + assert.Equal(t, "namespace-1", result.Name) + assert.Equal(t, 4.5, result.NamespaceResourceAllocation.DefaultResourceAllocation.CPUCores) + assert.Equal(t, 2048.8, result.NamespaceResourceAllocation.DefaultResourceAllocation.MemoryMiB) + +} + +func TestToWorkspaceNamespace_InvalidRegex(t *testing.T) { + clusterRbacBinding := map[string]interface{}{ + "name": "/namespace-1", + "resource_allocation": map[string]interface{}{ + "cpu_cores": "4.5", + "memory_MiB": "2048.8", + }, + "images_blacklist": []interface{}{"image1", "image2"}, + } + + result := toWorkspaceNamespace(clusterRbacBinding) + + assert.NotNil(t, result) + assert.Equal(t, "/namespace-1", result.Name) + +} diff --git a/tests/mockApiServer/apiServerMock.go b/tests/mockApiServer/apiServerMock.go new file mode 100644 index 00000000..7456947d --- /dev/null +++ b/tests/mockApiServer/apiServerMock.go @@ -0,0 +1,131 @@ +package main + +import ( + "encoding/json" + "github.com/gorilla/mux" + "github.com/spectrocloud/terraform-provider-spectrocloud/tests/mockApiServer/routes" + "log" + "net/http" +) + +// API key for authentication +const apiKey = "12345" + +// Aggregate all routes into slices for different servers +var allRoutesPositive []routes.Route +var allRoutesNegative []routes.Route + +// Middleware to check for the API key and log the Project-ID if present +func apiKeyAuthMiddleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Header.Get("ApiKey") != apiKey { + http.Error(w, "Forbidden", http.StatusForbidden) + return + } + // Log the Project-ID if it is present + if projectID := r.Header.Get("Project-ID"); projectID != "" { + log.Printf("Project-ID: %s", projectID) + } + next.ServeHTTP(w, r) + }) +} + +func main() { + // Create routers for different ports + router8080 := mux.NewRouter() + router8888 := mux.NewRouter() + + // Set up routes for port 8080 + setupRoutes(router8080, allRoutesPositive) + + // Set up routes for port 8888 + setupRoutes(router8888, allRoutesNegative) + + // Start servers on different ports + go func() { + log.Println("Starting server on :8080...") + if err := http.ListenAndServeTLS(":8080", "mock_server.crt", "mock_server.key", router8080); err != nil { + log.Fatalf("Server failed to start on port 8080: %v", err) + } + }() + + log.Println("Starting server on :8888...") + + if err := http.ListenAndServeTLS(":8888", "mock_server.crt", "mock_server.key", router8888); err != nil { + log.Fatalf("Server failed to start on port 8080: %v", err) + } +} + +// setupRoutes configures the given router with the provided routes +func setupRoutes(router *mux.Router, routes []routes.Route) { + // Apply API key middleware to all routes + router.Use(apiKeyAuthMiddleware) + + // Register all routes + for _, route := range routes { + route := route // capture the range variable + + router.HandleFunc(route.Path, func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(route.Response.StatusCode) + if route.Response.Payload != nil { + err := json.NewEncoder(w).Encode(route.Response.Payload) + if err != nil { + return + } + } + }).Methods(route.Method) + } +} + +func aggregateRoutes(routeFuncs ...func() []routes.Route) []routes.Route { + var aggregatedRoutes []routes.Route + for _, routeFunc := range routeFuncs { + aggregatedRoutes = append(aggregatedRoutes, routeFunc()...) + } + return aggregatedRoutes +} + +func init() { + // Initialize routes for port 8080 + allRoutesPositive = aggregateRoutes( + routes.CommonProjectRoutes, + routes.ProjectRoutes, + routes.AppliancesRoutes, + routes.UserRoutes, + routes.FilterRoutes, + routes.RolesRoutes, + routes.RegistriesRoutes, + routes.PacksRoutes, + routes.ClusterProfileRoutes, + routes.CloudAccountsRoutes, + routes.ClusterCommonRoutes, + routes.ClusterRoutes, + routes.AppProfilesRoutes, + routes.TeamRoutes, + routes.ApplicationRoutes, + routes.BackupRoutes, + routes.IPPoolRoutes, + routes.MacrosRoutes, + routes.TenantRoutes, + routes.WorkSpaceRoutes, + routes.AlertRoutes, + routes.ClusterGroupRoutes, + ) + // Initialize routes for port 8888 + allRoutesNegative = aggregateRoutes( + routes.CommonProjectRoutes, + routes.ProjectNegativeRoutes, + routes.AppliancesNegativeRoutes, + routes.UserNegativeRoutes, + routes.FilterNegativeRoutes, + routes.RolesNegativeRoutes, + routes.RegistriesNegativeRoutes, + routes.PacksNegativeRoutes, + routes.ClusterProfileNegativeRoutes, + routes.CloudAccountsNegativeRoutes, + routes.ClusterCommonNegativeRoutes, + routes.MacrosNegativeRoutes, + routes.TenantNegativeRoutes, + ) +} diff --git a/tests/mockApiServer/routes/common.go b/tests/mockApiServer/routes/common.go new file mode 100644 index 00000000..a824a568 --- /dev/null +++ b/tests/mockApiServer/routes/common.go @@ -0,0 +1,54 @@ +package routes + +import ( + "crypto/rand" + "encoding/hex" + "github.com/spectrocloud/palette-sdk-go/api/models" + "net/http" +) + +// ResponseData defines the structure of mock responses +type ResponseData struct { + StatusCode int + Payload interface{} +} + +// Route defines a mock route with method, path, and response +type Route struct { + Method string + Path string + Response ResponseData +} + +func generateRandomStringUID() string { + bytes := make([]byte, 24/2) + _, err := rand.Read(bytes) + if err != nil { + return "test" + } + return hex.EncodeToString(bytes) +} + +func CommonProjectRoutes() []Route { + return []Route{ + { + Method: "GET", + Path: "/v1/health", + Response: ResponseData{ + StatusCode: http.StatusOK, + Payload: map[string]interface{}{ + "healthy": true, + }, + }, + }, + } +} + +func getError(code string, msg string) models.V1Error { + return models.V1Error{ + Code: code, + Details: nil, + Message: msg, + Ref: "ref-" + generateRandomStringUID(), + } +} diff --git a/tests/mockApiServer/routes/mockAlert.go b/tests/mockApiServer/routes/mockAlert.go new file mode 100644 index 00000000..b38c2a5a --- /dev/null +++ b/tests/mockApiServer/routes/mockAlert.go @@ -0,0 +1,60 @@ +package routes + +import "github.com/spectrocloud/palette-sdk-go/api/models" + +func AlertRoutes() []Route { + return []Route{ + { + Method: "PUT", + Path: "/v1/projects/{uid}/alerts/{component}/{alertUid}", + Response: ResponseData{ + StatusCode: 204, + Payload: nil, + }, + }, + { + Method: "DELETE", + Path: "/v1/projects/{uid}/alerts/{component}/{alertUid}", + Response: ResponseData{ + StatusCode: 204, + Payload: nil, + }, + }, + { + Method: "POST", + Path: "/v1/projects/{uid}/alerts/{component}", + Response: ResponseData{ + StatusCode: 201, + Payload: map[string]string{"UID": "test-alert-1"}, + }, + }, + { + Method: "GET", + Path: "/v1/projects/{uid}/alerts/{component}/{alertUid}", + Response: ResponseData{ + StatusCode: 200, + Payload: &models.V1Channel{ + AlertAllUsers: false, + CreatedBy: "test-user", + HTTP: &models.V1ChannelHTTP{ + Body: "test body", + Headers: map[string]string{ + "test": "test", + }, + Method: "PUT", + URL: "test.com", + }, + Identifiers: []string{"test1"}, + IsActive: false, + Status: &models.V1AlertNotificationStatus{ + IsSucceeded: false, + Message: "test message", + Time: models.V1Time{}, + }, + Type: "test-type", + UID: "test-uid", + }, + }, + }, + } +} diff --git a/tests/mockApiServer/routes/mockAppProfiles.go b/tests/mockApiServer/routes/mockAppProfiles.go new file mode 100644 index 00000000..4b8ce7bf --- /dev/null +++ b/tests/mockApiServer/routes/mockAppProfiles.go @@ -0,0 +1,121 @@ +package routes + +import ( + "github.com/spectrocloud/palette-sdk-go/api/client/v1" + "github.com/spectrocloud/palette-sdk-go/api/models" +) + +func AppProfilesRoutes() []Route { + return []Route{ + { + Method: "POST", + Path: "/v1/appProfiles", + Response: ResponseData{ + StatusCode: 201, + Payload: map[string]string{"UID": "test-app-profile-test"}, + }, + }, + { + Method: "GET", + Path: "/v1/appProfiles/{uid}/tiers", + Response: ResponseData{ + StatusCode: 200, + Payload: &models.V1AppProfileTiers{ + Metadata: &models.V1ObjectMeta{ + Name: "test-tier-1", + UID: "test-uid", + }, + Spec: &models.V1AppProfileTiersSpec{ + AppTiers: []*models.V1AppTier{ + { + Metadata: &models.V1ObjectMeta{ + Name: "test-tier-0", + UID: "test-0-uid", + }, + Spec: &models.V1AppTierSpec{ + ContainerRegistryUID: "test", + InstallOrder: 0, + Manifests: []*models.V1ObjectReference{ + { + Kind: "cluster", + Name: "test-manifest", + UID: "test-manifest-uid", + }, + }, + Properties: nil, + RegistryUID: "test-reg-uid", + SourceAppTierUID: "test-source", + Type: "manifest", + Values: "test-values", + Version: "1.0.0", + }, + }, + }, + }, + }, + }, + }, + { + Method: "GET", + Path: "/v1/appProfiles/{uid}/tiers/{tierUid}/manifests/{manifestUid}", + Response: ResponseData{ + StatusCode: 200, + Payload: &models.V1Manifest{ + Metadata: &models.V1ObjectMeta{ + Name: "test-manifest", + UID: "test-manifest-uid", + }, + Spec: &models.V1ManifestPublishedSpec{ + Published: &models.V1ManifestData{ + Content: "test-manifest-content", + Digest: "test-digest", + }, + }, + }, + }, + }, + { + Method: "GET", + Path: "/v1/appProfiles/{uid}", + Response: ResponseData{ + StatusCode: 200, + Payload: models.V1AppProfile{ + Metadata: &models.V1ObjectMeta{ + Name: "test-app-profile", + UID: "test-app-profile-id", + }, + Spec: &models.V1AppProfileSpec{ + ParentUID: "test-parent-id", + Template: &models.V1AppProfileTemplate{ + AppTiers: []*models.V1AppTierRef{ + { + Name: "test-tier-1", + Type: "manifest", + UID: "tes-uid", + Version: "1.0.0", + }, + }, + RegistryRefs: nil, + }, + Version: "1.0.0", + Versions: []*models.V1AppProfileVersion{ + { + UID: "v1-id", + Version: "1.0.0", + }, + }, + }, + Status: nil, + }, + }, + }, + { + Method: "DELETE", + Path: "/v1/appProfiles/{uid}", + Response: ResponseData{ + StatusCode: 204, + Payload: &v1.V1AppProfilesUIDDeleteNoContent{}, + }, + }, + } +} diff --git a/tests/mockApiServer/routes/mockAppliances.go b/tests/mockApiServer/routes/mockAppliances.go new file mode 100644 index 00000000..bcf6ab6d --- /dev/null +++ b/tests/mockApiServer/routes/mockAppliances.go @@ -0,0 +1,222 @@ +package routes + +import ( + "github.com/spectrocloud/gomi/pkg/ptr" + "github.com/spectrocloud/palette-sdk-go/api/models" + "net/http" + "strconv" +) + +func getEdgeHostSearchSummary() models.V1EdgeHostsSearchSummary { + var items []*models.V1EdgeHostsMetadata + var profileSummary []*models.V1ProfileTemplateSummary + profileSummary = append(profileSummary, &models.V1ProfileTemplateSummary{ + CloudType: "aws", + Name: "test-profile-1", + Packs: []*models.V1PackRefSummary{{ + AddonType: "", + Annotations: nil, + DisplayName: "k8", + Layer: "infra", + LogoURL: "", + Name: "kubernetes_pack", + PackUID: generateRandomStringUID(), + Tag: "", + Type: "", + Version: "1.28.0", + }}, + Type: "cluster", + UID: generateRandomStringUID(), + Version: "1.0", + }) + items = append(items, &models.V1EdgeHostsMetadata{ + Metadata: &models.V1ObjectMeta{ + Annotations: nil, + CreationTimestamp: models.V1Time{}, + DeletionTimestamp: models.V1Time{}, + Labels: nil, + LastModifiedTimestamp: models.V1Time{}, + Name: "test-edge-01", + UID: generateRandomStringUID(), + }, + Spec: &models.V1EdgeHostsMetadataSpec{ + ClusterProfileTemplates: profileSummary, + Device: &models.V1DeviceSpec{ + ArchType: ptr.StringPtr("AMD"), + CPU: &models.V1CPU{ + Cores: 2, + }, + Disks: []*models.V1Disk{{ + Controller: "", + Partitions: nil, + Size: 50, + Vendor: "", + }}, + Gpus: []*models.V1GPUDeviceSpec{ + { + Addresses: map[string]string{ + "test": "121.0.0.1", + }, + Model: "xyz", + Vendor: "abc", + }, + }, + Memory: nil, + Nics: nil, + Os: nil, + }, + Host: &models.V1EdgeHostSpecHost{ + HostAddress: "192.168.1.100", + MacAddress: "2001:0db8:85a3:0000:0000:8a2e:0370:7334", + }, + ProjectMeta: nil, + Type: "", + }, + Status: &models.V1EdgeHostsMetadataStatus{ + Health: &models.V1EdgeHostHealth{ + AgentVersion: "", + Message: "", + State: "healthy", + }, + InUseClusters: nil, + State: "", + }, + }) + return models.V1EdgeHostsSearchSummary{ + Items: items, + Listmeta: &models.V1ListMetaData{ + Continue: "", + Count: 1, + Limit: 50, + Offset: 0, + }, + } +} + +func getEdgeHostPayload() models.V1EdgeHostDevice { + return models.V1EdgeHostDevice{ + Aclmeta: &models.V1ACLMeta{ + OwnerUID: generateRandomStringUID(), + ProjectUID: generateRandomStringUID(), + TenantUID: generateRandomStringUID(), + }, + Metadata: &models.V1ObjectMeta{ + Annotations: nil, + CreationTimestamp: models.V1Time{}, + DeletionTimestamp: models.V1Time{}, + Labels: map[string]string{"type": "test"}, + LastModifiedTimestamp: models.V1Time{}, + Name: "test-edge-01", + UID: generateRandomStringUID(), + }, + Spec: &models.V1EdgeHostDeviceSpec{ + CloudProperties: nil, + ClusterProfileTemplates: nil, + Device: &models.V1DeviceSpec{ + ArchType: ptr.StringPtr("amd64"), + CPU: nil, + Disks: nil, + Gpus: nil, + Memory: nil, + Nics: nil, + Os: nil, + }, + Host: nil, + Properties: nil, + Service: nil, + Type: "", + Version: "1.0", + }, + Status: &models.V1EdgeHostDeviceStatus{ + Health: &models.V1EdgeHostHealth{ + AgentVersion: "", + Message: "", + State: "healthy", + }, + InUseClusters: nil, + Packs: nil, + ProfileStatus: nil, + ServiceAuthToken: "", + State: "ready", + }, + } +} + +//func creatEdgeHostErrorResponse() interface{} { +// var payload interface{} +// payload = map[string]interface{}{ +// "UID": ptr.StringPtr("test-edge-host-id"), +// } +// return map[string]interface{}{ +// "AuditUID": generateRandomStringUID(), +// "Payload": payload, +// } +//} + +func AppliancesRoutes() []Route { + return []Route{ + { + Method: "DELETE", + Path: "/v1/edgehosts/{uid}", + Response: ResponseData{ + StatusCode: 204, + Payload: map[string]string{ + "err": "test_error", + }, + }, + }, + { + Method: "POST", + Path: "/v1/dashboard/edgehosts/search", + Response: ResponseData{ + StatusCode: http.StatusOK, + Payload: getEdgeHostSearchSummary(), + }, + }, + { + Method: "GET", + Path: "/v1/edgehosts/{uid}", + Response: ResponseData{ + StatusCode: http.StatusOK, + Payload: getEdgeHostPayload(), + }, + }, + } +} + +func AppliancesNegativeRoutes() []Route { + return []Route{ + { + Method: "POST", + Path: "/v1/edgehosts", + Response: ResponseData{ + StatusCode: http.StatusLocked, + Payload: getError(strconv.Itoa(http.StatusNotFound), "Operation not allowed"), + }, + }, + { + Method: "POST", + Path: "/v1/dashboard/edgehosts/search", + Response: ResponseData{ + StatusCode: http.StatusNotFound, + Payload: getError(strconv.Itoa(http.StatusNotFound), "No edge host found"), + }, + }, + { + Method: "GET", + Path: "/v1/edgehosts/{uid}", + Response: ResponseData{ + StatusCode: http.StatusNotFound, + Payload: getError(strconv.Itoa(http.StatusNotFound), "No edge host found"), + }, + }, + { + Method: "DELETE", + Path: "/v1/edgehosts/{uid}", + Response: ResponseData{ + StatusCode: http.StatusNotFound, + Payload: getError(strconv.Itoa(http.StatusNotFound), "No edge host found"), + }, + }, + } +} diff --git a/tests/mockApiServer/routes/mockApplication.go b/tests/mockApiServer/routes/mockApplication.go new file mode 100644 index 00000000..d616572e --- /dev/null +++ b/tests/mockApiServer/routes/mockApplication.go @@ -0,0 +1,5 @@ +package routes + +func ApplicationRoutes() []Route { + return []Route{} +} diff --git a/tests/mockApiServer/routes/mockBackup.go b/tests/mockApiServer/routes/mockBackup.go new file mode 100644 index 00000000..dcd5dd45 --- /dev/null +++ b/tests/mockApiServer/routes/mockBackup.go @@ -0,0 +1,98 @@ +package routes + +import ( + "github.com/spectrocloud/gomi/pkg/ptr" + "github.com/spectrocloud/palette-sdk-go/api/models" +) + +func BackupRoutes() []Route { + return []Route{ + { + Method: "POST", + Path: "/v1/users/assets/locations/s3", + Response: ResponseData{ + StatusCode: 201, + Payload: map[string]string{"UID": "test-backup-location-id"}, + }, + }, + { + Method: "DELETE", + Path: "/v1/users/assets/locations/s3/{uid}", + Response: ResponseData{ + StatusCode: 204, + Payload: nil, + }, + }, + { + Method: "PUT", + Path: "/v1/users/assets/locations/s3/{uid}", + Response: ResponseData{ + StatusCode: 204, + Payload: nil, + }, + }, + { + Method: "GET", + Path: "/v1/users/assets/locations/s3/{uid}", + Response: ResponseData{ + StatusCode: 200, + Payload: &models.V1UserAssetsLocationS3{ + Metadata: &models.V1ObjectMetaInputEntity{ + Annotations: nil, + Labels: nil, + Name: "test-backup-location", + }, + Spec: &models.V1UserAssetsLocationS3Spec{ + Config: &models.V1S3StorageConfig{ + BucketName: ptr.StringPtr("test-bucket"), + CaCert: "test-cert", + Credentials: &models.V1AwsCloudAccount{ + AccessKey: "test-access-key", + CredentialType: "secret", + Partition: nil, + PolicyARNs: []string{"test-arn"}, + SecretKey: "test-secret-key", + Sts: nil, + }, + Region: ptr.StringPtr("test-east"), + S3ForcePathStyle: ptr.BoolPtr(false), + S3URL: "s3://test/test", + UseRestic: nil, + }, + IsDefault: false, + Type: "", + }, + }, + }, + }, + { + Method: "GET", + Path: "/v1/users/assets/locations", + Response: ResponseData{ + StatusCode: 200, + Payload: &models.V1UserAssetsLocations{ + Items: []*models.V1UserAssetsLocation{ + { + Metadata: &models.V1ObjectMeta{ + Annotations: nil, + Labels: nil, + Name: "test-bsl-location", + UID: "test-bsl-location-id", + }, + Spec: &models.V1UserAssetsLocationSpec{ + IsDefault: false, + Storage: "s3", + Type: "", + }, + }, + }, + }, + }, + }, + { + Method: "", + Path: "", + Response: ResponseData{}, + }, + } +} diff --git a/tests/mockApiServer/routes/mockCloudAccounts.go b/tests/mockApiServer/routes/mockCloudAccounts.go new file mode 100644 index 00000000..005bb34d --- /dev/null +++ b/tests/mockApiServer/routes/mockCloudAccounts.go @@ -0,0 +1,1077 @@ +package routes + +import ( + "github.com/spectrocloud/gomi/pkg/ptr" + "github.com/spectrocloud/palette-sdk-go/api/models" +) + +func getAccountResponse(cloud string) interface{} { + switch cloud { + case "aws": + return &models.V1AwsAccounts{ + Items: []*models.V1AwsAccount{ + { + APIVersion: "", + Kind: "", + Metadata: &models.V1ObjectMeta{ + Name: "test-aws-account-1", + UID: "test-aws-account-id-1", + }, + Spec: &models.V1AwsCloudAccount{ + AccessKey: "test-access-key", + CredentialType: "secret", + Partition: nil, + PolicyARNs: nil, + SecretKey: "test-crt", + Sts: &models.V1AwsStsCredentials{ + Arn: "test-arn", + ExternalID: "test-ex-id", + }, + }, + Status: &models.V1CloudAccountStatus{State: "active"}, + }, + { + APIVersion: "", + Kind: "", + Metadata: &models.V1ObjectMeta{ + Name: "test-aws-account-2", + UID: generateRandomStringUID(), + }, + Spec: &models.V1AwsCloudAccount{ + AccessKey: "test-access-key", + CredentialType: "secret", + Partition: nil, + PolicyARNs: nil, + SecretKey: "test-crt", + Sts: &models.V1AwsStsCredentials{ + Arn: "test-arn", + ExternalID: "test-ex-id", + }, + }, + Status: &models.V1CloudAccountStatus{State: "active"}, + }, + }, + Listmeta: &models.V1ListMetaData{ + Continue: "", + Count: 2, + Limit: 10, + Offset: 0, + }, + } + case "azure": + return &models.V1AzureAccounts{ + Items: []*models.V1AzureAccount{ + { + APIVersion: "", + Kind: "", + Metadata: &models.V1ObjectMeta{ + Name: "test-azure-account-1", + UID: "test-azure-account-id-1", + }, + Spec: &models.V1AzureCloudAccount{ + AzureEnvironment: ptr.StringPtr("test-env"), + ClientID: ptr.StringPtr("test-client-id"), + ClientSecret: ptr.StringPtr("test-secret"), + Settings: nil, + TenantID: ptr.StringPtr("tenant-id"), + TenantName: "test", + }, + Status: nil, + }, + }, + Listmeta: nil, + } + case "tke": + return &models.V1TencentAccounts{ + Items: []*models.V1TencentAccount{ + { + APIVersion: "", + Kind: "", + Metadata: &models.V1ObjectMeta{ + Annotations: nil, + CreationTimestamp: models.V1Time{}, + DeletionTimestamp: models.V1Time{}, + Labels: nil, + LastModifiedTimestamp: models.V1Time{}, + Name: "test-tke-account-1", + UID: "test-tke-account-id-1", + }, + Spec: &models.V1TencentCloudAccount{ + SecretID: ptr.StringPtr("test-secretID"), + SecretKey: ptr.StringPtr("test-secretKey"), + }, + Status: &models.V1CloudAccountStatus{ + State: "active", + }, + }, + }, + Listmeta: &models.V1ListMetaData{ + Continue: "", + Count: 2, + Limit: 10, + Offset: 0, + }, + } + case "gcp": + return &models.V1GcpAccounts{ + Items: []*models.V1GcpAccount{ + { + APIVersion: "", + Kind: "", + Metadata: &models.V1ObjectMeta{ + Annotations: nil, + CreationTimestamp: models.V1Time{}, + DeletionTimestamp: models.V1Time{}, + Labels: nil, + LastModifiedTimestamp: models.V1Time{}, + Name: "test-gcp-account-1", + UID: "test-gcp-account-id-1", + }, + Spec: nil, + Status: nil, + }, + }, + Listmeta: nil, + } + case "vsphere": + return &models.V1VsphereAccounts{ + Items: []*models.V1VsphereAccount{ + { + Metadata: &models.V1ObjectMeta{ + Name: "test-vsphere-account-1", + UID: "test-vsphere-account-id-1", + }, + }, + }, + Listmeta: nil, + } + case "openstack": + return &models.V1OpenStackAccounts{ + Items: []*models.V1OpenStackAccount{ + { + Metadata: &models.V1ObjectMeta{ + Name: "test-openstack-account-1", + UID: "test-openstack-account-id-1", + }, + }, + }, + Listmeta: nil, + } + case "maas": + return &models.V1MaasAccounts{ + Items: []*models.V1MaasAccount{ + { + Metadata: &models.V1ObjectMeta{ + Name: "test-maas-account-1", + UID: "test-maas-account-id-1", + }, + Spec: &models.V1MaasCloudAccount{ + APIEndpoint: ptr.StringPtr("test.end.com"), + APIKey: ptr.StringPtr("testApiKey"), + PreferredSubnets: []string{"subnet1"}, + }, + }, + }, + Listmeta: nil, + } + case "custom": + return &models.V1CustomAccounts{ + Items: []*models.V1CustomAccount{ + { + Metadata: &models.V1ObjectMeta{ + Name: "test-custom-account-1", + UID: "test-custom-account-id-1", + }, + }, + }, + Listmeta: nil, + } + } + return nil +} + +func getAccountNegativeResponse(cloud string) interface{} { + switch cloud { + case "aws": + return &models.V1AwsAccounts{ + Items: []*models.V1AwsAccount{ + { + APIVersion: "", + Kind: "", + Metadata: &models.V1ObjectMeta{ + Name: "test-aws-account-2", + UID: generateRandomStringUID(), + }, + Spec: &models.V1AwsCloudAccount{ + AccessKey: "test-access-key", + CredentialType: "secret", + Partition: nil, + PolicyARNs: nil, + SecretKey: "test-crt", + Sts: &models.V1AwsStsCredentials{ + Arn: "test-arn", + ExternalID: "test-ex-id", + }, + }, + Status: &models.V1CloudAccountStatus{State: "active"}, + }, + }, + Listmeta: &models.V1ListMetaData{ + Continue: "", + Count: 2, + Limit: 10, + Offset: 0, + }, + } + case "azure": + return &models.V1AzureAccounts{ + Items: []*models.V1AzureAccount{ + { + APIVersion: "", + Kind: "", + Metadata: &models.V1ObjectMeta{ + Name: "test-azure-account-1-neg", + UID: "test-azure-account-id-1-neg", + }, + Spec: &models.V1AzureCloudAccount{ + AzureEnvironment: ptr.StringPtr("test-env"), + ClientID: ptr.StringPtr("test-client-id"), + ClientSecret: ptr.StringPtr("test-secret"), + Settings: nil, + TenantID: ptr.StringPtr("tenant-id"), + TenantName: "test", + }, + Status: nil, + }, + }, + Listmeta: nil, + } + case "tke": + return &models.V1TencentAccounts{ + Items: []*models.V1TencentAccount{ + { + APIVersion: "", + Kind: "", + Metadata: &models.V1ObjectMeta{ + Annotations: nil, + CreationTimestamp: models.V1Time{}, + DeletionTimestamp: models.V1Time{}, + Labels: nil, + LastModifiedTimestamp: models.V1Time{}, + Name: "test--1", + UID: "test-id-1", + }, + Spec: &models.V1TencentCloudAccount{ + SecretID: ptr.StringPtr("test-secretID"), + SecretKey: ptr.StringPtr("test-secretKey"), + }, + Status: &models.V1CloudAccountStatus{ + State: "notActive", + }, + }, + }, + Listmeta: &models.V1ListMetaData{ + Continue: "", + Count: 2, + Limit: 10, + Offset: 0, + }, + } + case "gcp": + return &models.V1GcpAccounts{ + Items: []*models.V1GcpAccount{ + { + APIVersion: "", + Kind: "", + Metadata: &models.V1ObjectMeta{ + Annotations: nil, + CreationTimestamp: models.V1Time{}, + DeletionTimestamp: models.V1Time{}, + Labels: nil, + LastModifiedTimestamp: models.V1Time{}, + Name: "test-gcp-1-neg", + UID: "test-account-gcp-id-1-neg", + }, + Spec: nil, + Status: nil, + }, + }, + Listmeta: nil, + } + case "vsphere": + return &models.V1VsphereAccounts{ + Items: []*models.V1VsphereAccount{ + { + Metadata: &models.V1ObjectMeta{ + Name: "test-vsphere-account-1-neg", + UID: "test-vsphere-account-id-1-neg", + }, + }, + }, + Listmeta: nil, + } + case "openstack": + return &models.V1OpenStackAccounts{ + Items: []*models.V1OpenStackAccount{ + { + Metadata: &models.V1ObjectMeta{ + Name: "test-openstack-account-1-neg", + UID: "test-openstack-account-uid-1-neg", + }, + }, + }, + Listmeta: nil, + } + case "maas": + return &models.V1MaasAccounts{ + Items: []*models.V1MaasAccount{ + { + Metadata: &models.V1ObjectMeta{ + Name: "test-maas-account-1-neg", + UID: "test-maas-account-id-1-neg", + }, + Spec: &models.V1MaasCloudAccount{ + APIEndpoint: ptr.StringPtr("test.end.com"), + APIKey: ptr.StringPtr("testApiKey"), + PreferredSubnets: []string{"subnet1"}, + }, + }, + }, + Listmeta: nil, + } + case "custom": + return &models.V1CustomAccounts{ + Items: []*models.V1CustomAccount{ + { + Metadata: &models.V1ObjectMeta{ + Name: "test-custom-account-1-neg", + UID: "test-custom-account-id-1-neg", + }, + }, + }, + Listmeta: nil, + } + } + return nil +} + +func CloudAccountsRoutes() []Route { + return []Route{ + { + Method: "GET", + Path: "/v1/cloudaccounts/summary", + Response: ResponseData{ + StatusCode: 200, + Payload: &models.V1CloudAccountsSummary{ + Items: []*models.V1CloudAccountSummary{ + { + Kind: "", + Metadata: &models.V1ObjectMeta{ + Annotations: map[string]string{"scope": "project"}, + Name: "test-import-account", + UID: "test-import-acc-id", + }, + SpecSummary: &models.V1CloudAccountSummarySpecSummary{ + AccountID: "test-import-acc-id", + }, + Status: &models.V1CloudAccountStatus{ + State: "Active", + }, + }, + }, + Listmeta: nil, + }, + }, + }, + + // gcp + { + Method: "POST", + Path: "/v1/cloudaccounts/gcp", + Response: ResponseData{ + StatusCode: 201, + Payload: map[string]string{"UID": "test-gcp-account-id-1"}, + }, + }, + { + Method: "POST", + Path: "/v1/clouds/gcp/account/validate", + Response: ResponseData{ + StatusCode: 204, + Payload: map[string]string{"AuditUID": generateRandomStringUID()}, + }, + }, + { + Method: "PUT", + Path: "/v1/cloudaccounts/gcp/{uid}", + Response: ResponseData{ + StatusCode: 204, + Payload: nil, + }, + }, + { + Method: "DELETE", + Path: "/v1/cloudaccounts/gcp/{uid}", + Response: ResponseData{ + StatusCode: 204, + Payload: nil, + }, + }, + { + Method: "GET", + Path: "/v1/cloudaccounts/gcp/{uid}", + Response: ResponseData{ + StatusCode: 200, + Payload: &models.V1GcpAccount{ + APIVersion: "", + Kind: "", + Metadata: &models.V1ObjectMeta{ + Annotations: nil, + CreationTimestamp: models.V1Time{}, + DeletionTimestamp: models.V1Time{}, + Labels: nil, + LastModifiedTimestamp: models.V1Time{}, + Name: "test-gcp-account-1", + UID: "test-gcp-account-id-1", + }, + Spec: &models.V1GcpAccountSpec{ + JSONCredentials: "test-json-cred", + JSONCredentialsFileName: "test-json", + }, + Status: &models.V1CloudAccountStatus{ + State: "Running", + }, + }, + }, + }, + { + Method: "POST", + Path: "/v1/overlords/gcp/{uid}/account/validate", + Response: ResponseData{ + StatusCode: 204, + Payload: map[string]string{"AuditUID": generateRandomStringUID()}, + }, + }, + { + Method: "GET", + Path: "/v1/cloudaccounts/gcp", + Response: ResponseData{ + StatusCode: 200, + Payload: getAccountResponse("gcp"), + }, + }, + + // Maas + { + Method: "POST", + Path: "/v1/cloudaccounts/maas", + Response: ResponseData{ + StatusCode: 201, + Payload: map[string]string{"UID": "test-maas-account-1"}, + }, + }, + { + Method: "POST", + Path: "/v1/clouds/maas/account/validate", + Response: ResponseData{ + StatusCode: 204, + Payload: map[string]string{"AuditUID": generateRandomStringUID()}, + }, + }, + { + Method: "PUT", + Path: "/v1/cloudaccounts/maas/{uid}", + Response: ResponseData{ + StatusCode: 204, + Payload: nil, + }, + }, + { + Method: "DELETE", + Path: "/v1/cloudaccounts/maas/{uid}", + Response: ResponseData{ + StatusCode: 204, + Payload: nil, + }, + }, + { + Method: "GET", + Path: "/v1/cloudaccounts/maas", + Response: ResponseData{ + StatusCode: 200, + Payload: getAccountResponse("maas"), + }, + }, + { + Method: "GET", + Path: "/v1/cloudaccounts/maas/{uid}", + Response: ResponseData{ + StatusCode: 200, + Payload: &models.V1MaasAccount{ + Metadata: &models.V1ObjectMeta{ + Name: "test-maas-account-1", + UID: "test-maas-account-id-1", + Annotations: map[string]string{"overlordUid": "test-pcg-id"}, + }, + Spec: &models.V1MaasCloudAccount{ + APIEndpoint: ptr.StringPtr("test.end.com"), + APIKey: ptr.StringPtr("testApiKey"), + PreferredSubnets: []string{"subnet1"}, + }, + }, + }, + }, + { + Method: "POST", + Path: "/v1/overlords/maas/{uid}/account/validate", + Response: ResponseData{ + StatusCode: 204, + Payload: map[string]string{"AuditUID": generateRandomStringUID()}, + }, + }, + + // azure + { + Method: "POST", + Path: "/v1/cloudaccounts/azure", + Response: ResponseData{ + StatusCode: 201, + Payload: map[string]string{"UID": "test-aws-account-1"}, + }, + }, + { + Method: "POST", + Path: "/v1/clouds/azure/account/validate", + Response: ResponseData{ + StatusCode: 204, + Payload: map[string]string{"AuditUID": generateRandomStringUID()}, + }, + }, + { + Method: "PUT", + Path: "/v1/cloudaccounts/azure/{uid}", + Response: ResponseData{ + StatusCode: 204, + Payload: nil, + }, + }, + { + Method: "DELETE", + Path: "/v1/cloudaccounts/azure/{uid}", + Response: ResponseData{ + StatusCode: 204, + Payload: nil, + }, + }, + { + Method: "GET", + Path: "/v1/cloudaccounts/azure/{uid}", + Response: ResponseData{ + StatusCode: 200, + Payload: &models.V1AzureAccount{ + APIVersion: "", + Kind: "", + Metadata: &models.V1ObjectMeta{ + Annotations: map[string]string{"scope": "project", "overlordUid": ""}, + Labels: nil, + Name: "test-azure-account-1", + UID: "test-azure-account-id-1", + }, + Spec: &models.V1AzureCloudAccount{ + AzureEnvironment: ptr.StringPtr("test-env"), + ClientID: ptr.StringPtr("test-client-id"), + ClientSecret: ptr.StringPtr("test-secret"), + Settings: &models.V1CloudAccountSettings{ + DisablePropertiesRequest: false, + }, + TenantID: ptr.StringPtr("tenant-id"), + TenantName: "test", + }, + Status: nil, + }, + }, + }, + { + Method: "GET", + Path: "/v1/cloudaccounts/azure", + Response: ResponseData{ + StatusCode: 200, + Payload: getAccountResponse("azure"), + }, + }, + + // aws + { + Method: "GET", + Path: "/v1/cloudaccounts/aws", + Response: ResponseData{ + StatusCode: 200, + Payload: getAccountResponse("aws"), + }, + }, + { + Method: "POST", + Path: "/v1/cloudaccounts/aws", + Response: ResponseData{ + StatusCode: 201, + Payload: map[string]string{"UID": "test-aws-account-1"}, + }, + }, + { + Method: "POST", + Path: "/v1/clouds/aws/account/validate", + Response: ResponseData{ + StatusCode: 204, + Payload: map[string]string{"AuditUID": generateRandomStringUID()}, + }, + }, + { + Method: "PUT", + Path: "/v1/cloudaccounts/aws/{uid}", + Response: ResponseData{ + StatusCode: 204, + Payload: nil, + }, + }, + { + Method: "DELETE", + Path: "/v1/cloudaccounts/aws/{uid}", + Response: ResponseData{ + StatusCode: 204, + Payload: nil, + }, + }, + { + Method: "GET", + Path: "/v1/cloudaccounts/aws/{uid}", + Response: ResponseData{ + StatusCode: 200, + Payload: &models.V1AwsAccount{ + APIVersion: "", + Kind: "", + Metadata: &models.V1ObjectMeta{ + Name: "test-aws-account-1", + UID: "test-aws-account-id-1", + }, + Spec: &models.V1AwsCloudAccount{ + AccessKey: "test-access-key", + CredentialType: "secret", + Partition: nil, + PolicyARNs: nil, + SecretKey: "test-crt", + Sts: &models.V1AwsStsCredentials{ + Arn: "test-arn", + ExternalID: "test-ex-id", + }, + }, + Status: &models.V1CloudAccountStatus{State: "active"}, + }, + }, + }, + + // tke + { + Method: "GET", + Path: "/v1/cloudaccounts/tencent", + Response: ResponseData{ + StatusCode: 200, + Payload: getAccountResponse("tke"), + }, + }, + { + Method: "POST", + Path: "/v1/cloudaccounts/tencent", + Response: ResponseData{ + StatusCode: 201, + Payload: map[string]string{"UID": "test-tke-account-id-1"}, + }, + }, + { + Method: "POST", + Path: "/v1/clouds/tencent/account/validate", + Response: ResponseData{ + StatusCode: 204, + Payload: map[string]string{"AuditUID": generateRandomStringUID()}, + }, + }, + { + Method: "PUT", + Path: "/v1/cloudaccounts/tencent/{uid}", + Response: ResponseData{ + StatusCode: 204, + Payload: nil, + }, + }, + { + Method: "DELETE", + Path: "/v1/cloudaccounts/tencent/{uid}", + Response: ResponseData{ + StatusCode: 204, + Payload: nil, + }, + }, + { + Method: "GET", + Path: "/v1/cloudaccounts/tencent/{uid}", + Response: ResponseData{ + StatusCode: 200, + Payload: &models.V1TencentAccount{ + APIVersion: "", + Kind: "", + Metadata: &models.V1ObjectMeta{ + Annotations: nil, + CreationTimestamp: models.V1Time{}, + DeletionTimestamp: models.V1Time{}, + Labels: nil, + LastModifiedTimestamp: models.V1Time{}, + Name: "test-tke-account-1", + UID: "test-tke-account-id-1", + }, + Spec: &models.V1TencentCloudAccount{ + SecretID: ptr.StringPtr("test-secretID"), + SecretKey: ptr.StringPtr("test-secretKey"), + }, + Status: &models.V1CloudAccountStatus{ + State: "active", + }, + }, + }, + }, + { + Method: "POST", + Path: "/v1/overlords/tencent/{uid}/account/validate", + Response: ResponseData{ + StatusCode: 204, + Payload: map[string]string{"AuditUID": generateRandomStringUID()}, + }, + }, + + // vsphere + { + Method: "GET", + Path: "/v1/cloudaccounts/vsphere", + Response: ResponseData{ + StatusCode: 200, + Payload: getAccountResponse("vsphere"), + }, + }, + { + Method: "POST", + Path: "/v1/cloudaccounts/vsphere", + Response: ResponseData{ + StatusCode: 201, + Payload: map[string]string{"UID": "test-vsphere-account-id-1"}, + }, + }, + { + Method: "POST", + Path: "/v1/clouds/vsphere/account/validate", + Response: ResponseData{ + StatusCode: 204, + Payload: map[string]string{"AuditUID": generateRandomStringUID()}, + }, + }, + { + Method: "PUT", + Path: "/v1/cloudaccounts/vsphere/{uid}", + Response: ResponseData{ + StatusCode: 204, + Payload: nil, + }, + }, + { + Method: "DELETE", + Path: "/v1/cloudaccounts/vsphere/{uid}", + Response: ResponseData{ + StatusCode: 204, + Payload: nil, + }, + }, + { + Method: "GET", + Path: "/v1/cloudaccounts/vsphere/{uid}", + Response: ResponseData{ + StatusCode: 200, + Payload: &models.V1VsphereAccount{ + + Metadata: &models.V1ObjectMeta{ + Name: "test-vsphere-account-1", + UID: "test-vsphere-account-id-1", + }, + Spec: &models.V1VsphereCloudAccount{ + Insecure: false, + Password: ptr.StringPtr("test-pwd"), + Username: ptr.StringPtr("test-uname"), + VcenterServer: ptr.StringPtr("test-uname.com"), + }, + Status: &models.V1CloudAccountStatus{ + State: "Running", + }, + }, + }, + }, + { + Method: "POST", + Path: "/v1/overlords/vsphere/{uid}/account/validate", + Response: ResponseData{ + StatusCode: 204, + Payload: map[string]string{"AuditUID": generateRandomStringUID()}, + }, + }, + + // openstack + { + Method: "GET", + Path: "/v1/cloudaccounts/openstack", + Response: ResponseData{ + StatusCode: 200, + Payload: getAccountResponse("openstack"), + }, + }, + { + Method: "POST", + Path: "/v1/cloudaccounts/openstack", + Response: ResponseData{ + StatusCode: 201, + Payload: map[string]string{"UID": "test-openstack-account-id-1"}, + }, + }, + { + Method: "POST", + Path: "/v1/clouds/openstack/account/validate", + Response: ResponseData{ + StatusCode: 204, + Payload: map[string]string{"AuditUID": generateRandomStringUID()}, + }, + }, + { + Method: "PUT", + Path: "/v1/cloudaccounts/openstack/{uid}", + Response: ResponseData{ + StatusCode: 204, + Payload: nil, + }, + }, + { + Method: "DELETE", + Path: "/v1/cloudaccounts/openstack/{uid}", + Response: ResponseData{ + StatusCode: 204, + Payload: nil, + }, + }, + { + Method: "GET", + Path: "/v1/cloudaccounts/openstack/{uid}", + Response: ResponseData{ + StatusCode: 200, + Payload: &models.V1OpenStackAccount{ + Metadata: &models.V1ObjectMeta{ + Name: "test-openstack-account-1", + UID: "test-openstack-account-id-1", + }, + Spec: &models.V1OpenStackCloudAccount{ + CaCert: "testcert", + DefaultDomain: "test.com", + DefaultProject: "Default", + IdentityEndpoint: ptr.StringPtr("testtest"), + Insecure: false, + ParentRegion: "test-region", + Password: ptr.StringPtr("test-pwd"), + Username: ptr.StringPtr("test-uname"), + }, + Status: &models.V1CloudAccountStatus{ + State: "Running", + }, + }, + }, + }, + { + Method: "POST", + Path: "/v1/overlords/openstack/{uid}/account/validate", + Response: ResponseData{ + StatusCode: 204, + Payload: map[string]string{"AuditUID": generateRandomStringUID()}, + }, + }, + + { + Method: "GET", + Path: "/v1/cloudaccounts/cloudTypes/{cloudType}", + Response: ResponseData{ + StatusCode: 200, + Payload: getAccountResponse("custom"), + }, + }, + { + Method: "POST", + Path: "/v1/cloudaccounts/cloudTypes/{cloudType}", + Response: ResponseData{ + StatusCode: 201, + Payload: map[string]string{"UID": "mock-uid"}, + }, + }, + { + Method: "GET", + Path: "/v1/clouds/cloudTypes", + Response: ResponseData{ + StatusCode: 200, + Payload: &models.V1CustomCloudTypes{ + CloudTypes: []*models.V1CustomCloudType{ + { + CloudCategory: "test", + CloudFamily: "", + DisplayName: "test-cloud", + IsCustom: true, + IsManaged: false, + IsVertex: false, + Logo: "", + Name: "test-cloud", + }, + }, + }, + }, + }, + { + Method: "GET", + Path: "/v1/overlords/{uid}", + Response: ResponseData{ + StatusCode: 200, + Payload: &models.V1Overlord{ + Kind: "", + Metadata: &models.V1ObjectMeta{ + Name: "pcg-1", + UID: "pcg-1-id", + }, + Spec: &models.V1OverloadSpec{ + CloudAccountUID: "test-acc-id", + IPAddress: "121.0.0.1", + IPPools: nil, + IsSelfHosted: false, + IsSystem: false, + SpectroClusterUID: "test-spectro-id", + TenantUID: "test-tenant-id", + }, + Status: &models.V1OverloadStatus{ + Health: nil, + IsActive: false, + IsReady: false, + KubectlCommands: nil, + Notifications: nil, + State: "Running", + }, + }, + }, + }, + { + Method: "GET", + Path: "/v1/cloudaccounts/cloudTypes/{cloudType}/{uid}", + Response: ResponseData{ + StatusCode: 200, + Payload: &models.V1CustomAccount{ + APIVersion: "", + Kind: "", + Metadata: &models.V1ObjectMeta{ + Name: "test-name", + UID: "test-uid", + }, + Spec: &models.V1CustomCloudAccount{ + Credentials: map[string]string{ + "username": "test", + "password": "test", + }, + }, + Status: &models.V1CloudAccountStatus{ + State: "Active", + }, + }, + }, + }, + { + Method: "PUT", + Path: "/v1/cloudaccounts/cloudTypes/{cloudType}/{uid}", + Response: ResponseData{ + StatusCode: 204, + Payload: nil, + }, + }, + { + Method: "DELETE", + Path: "/v1/cloudaccounts/cloudTypes/{cloudType}/{uid}", + Response: ResponseData{ + StatusCode: 204, + Payload: nil, + }, + }, + } +} + +func CloudAccountsNegativeRoutes() []Route { + return []Route{ + { + Method: "GET", + Path: "/v1/cloudaccounts/gcp", + Response: ResponseData{ + StatusCode: 200, + Payload: getAccountNegativeResponse("gcp"), + }, + }, + { + Method: "GET", + Path: "/v1/cloudaccounts/azure", + Response: ResponseData{ + StatusCode: 200, + Payload: getAccountNegativeResponse("azure"), + }, + }, + + { + Method: "GET", + Path: "/v1/cloudaccounts/aws", + Response: ResponseData{ + StatusCode: 200, + Payload: getAccountNegativeResponse("aws"), + }, + }, + { + Method: "GET", + Path: "/v1/cloudaccounts/tencent", + Response: ResponseData{ + StatusCode: 200, + Payload: getAccountNegativeResponse("tke"), + }, + }, + { + Method: "GET", + Path: "/v1/cloudaccounts/vsphere", + Response: ResponseData{ + StatusCode: 200, + Payload: getAccountNegativeResponse("vsphere"), + }, + }, + { + Method: "GET", + Path: "/v1/cloudaccounts/openstack", + Response: ResponseData{ + StatusCode: 200, + Payload: getAccountNegativeResponse("openstack"), + }, + }, + { + Method: "GET", + Path: "/v1/cloudaccounts/maas", + Response: ResponseData{ + StatusCode: 200, + Payload: getAccountNegativeResponse("maas"), + }, + }, + { + Method: "GET", + Path: "/v1/cloudaccounts/cloudTypes/{cloudType}", + Response: ResponseData{ + StatusCode: 200, + Payload: getAccountNegativeResponse("custom"), + }, + }, + } +} diff --git a/tests/mockApiServer/routes/mockCluster.go b/tests/mockApiServer/routes/mockCluster.go new file mode 100644 index 00000000..41039dc0 --- /dev/null +++ b/tests/mockApiServer/routes/mockCluster.go @@ -0,0 +1,75 @@ +package routes + +import ( + "bytes" + v1 "github.com/spectrocloud/palette-sdk-go/api/client/v1" + "github.com/spectrocloud/palette-sdk-go/api/models" +) + +func ClusterRoutes() []Route { + var buffer bytes.Buffer + return []Route{ + { + Method: "POST", + Path: "/v1/dashboard/spectroclusters/search", + Response: ResponseData{ + StatusCode: 200, + Payload: &models.V1SpectroClustersSummary{ + Items: []*models.V1SpectroClusterSummary{ + { + Metadata: &models.V1ObjectMeta{ + Name: "test-cluster", + UID: "test-cluster-id", + }, + SpecSummary: nil, + Status: nil, + }, + }, + Listmeta: nil, + }, + }, + }, + { + Method: "GET", + Path: "/v1/spectroclusters/{uid}", + Response: ResponseData{ + StatusCode: 200, + Payload: &models.V1SpectroCluster{ + APIVersion: "", + Kind: "", + Metadata: &models.V1ObjectMeta{ + Name: "test-cluster", + UID: "test-cluster-id", + }, + Spec: nil, + Status: &models.V1SpectroClusterStatus{ + + State: "Running", + }, + }, + }, + }, + { + Method: "GET", + Path: "/v1/spectroclusters/{uid}/assets/kubeconfig", + Response: ResponseData{ + StatusCode: 200, + Payload: &v1.V1SpectroClustersUIDKubeConfigOK{ + ContentDisposition: "test-content", + Payload: &buffer, + }, + }, + }, + { + Method: "GET", + Path: "/v1/spectroclusters/{uid}/assets/adminKubeconfig", + Response: ResponseData{ + StatusCode: 200, + Payload: &v1.V1SpectroClustersUIDKubeConfigOK{ + ContentDisposition: "test-content", + Payload: &buffer, + }, + }, + }, + } +} diff --git a/tests/mockApiServer/routes/mockClusterCommon.go b/tests/mockApiServer/routes/mockClusterCommon.go new file mode 100644 index 00000000..6c3b5ff1 --- /dev/null +++ b/tests/mockApiServer/routes/mockClusterCommon.go @@ -0,0 +1,228 @@ +package routes + +import ( + "github.com/spectrocloud/gomi/pkg/ptr" + "github.com/spectrocloud/palette-sdk-go/api/models" +) + +func ClusterCommonRoutes() []Route { + + return []Route{ + { + Method: "POST", + Path: "/v1/spectroclusters/{uid}/upgrade/settings", + Response: ResponseData{ + StatusCode: 204, + Payload: map[string]string{"AuditUID": generateRandomStringUID()}, + }, + }, + { + Method: "POST", + Path: "/v1/appDeployments", + Response: ResponseData{ + StatusCode: 201, + Payload: map[string]string{"UID": "test-application-id"}, + }, + }, + { + Method: "POST", + Path: "/v1/appDeployments/clusterGroup", + Response: ResponseData{ + StatusCode: 201, + Payload: map[string]string{"UID": "test-application-id"}, + }, + }, + { + Method: "DELETE", + Path: "/v1/appDeployments/{uid}", + Response: ResponseData{ + StatusCode: 204, + Payload: nil, + }, + }, + { + Method: "GET", + Path: "/v1/appDeployments/{uid}", + Response: ResponseData{ + StatusCode: 200, + Payload: &models.V1AppDeployment{ + Metadata: &models.V1ObjectMeta{ + Name: "test-app-deployment", + UID: "test-app-id", + Annotations: map[string]string{"skip_apps": "skip_apps"}, + Labels: map[string]string{"skip_apps": "skip_apps"}, + }, + Spec: &models.V1AppDeploymentSpec{ + Config: &models.V1AppDeploymentConfig{ + Target: &models.V1AppDeploymentTargetConfig{ + ClusterRef: &models.V1AppDeploymentClusterRef{ + DeploymentClusterType: "test", + Name: "test-cluster-ref", + UID: "test-clsuterref-uid", + }, + EnvRef: &models.V1AppDeploymentTargetEnvironmentRef{ + Name: "test-clsuterref-name", + Type: "test", + UID: "test-envref-id", + }, + }, + }, + Profile: &models.V1AppDeploymentProfile{ + Metadata: &models.V1AppDeploymentProfileMeta{ + Name: "test-app-profile", + UID: "test-app-profile-id", + Version: "1.0.0", + }, + Template: &models.V1AppProfileTemplate{ + AppTiers: []*models.V1AppTierRef{ + { + Name: "test-app-tier-name", + Type: "test", + UID: "test-app-id", + Version: "1.0.0", + }, + }, + RegistryRefs: []*models.V1ObjectReference{ + { + Kind: "test-template", + Name: "test-reg-ref-name", + UID: "test-reg-ref-id", + }, + }, + }, + }, + }, + Status: &models.V1AppDeploymentStatus{ + AppTiers: []*models.V1ClusterPackStatus{ + { + Condition: &models.V1ClusterCondition{ + LastProbeTime: models.V1Time{}, + LastTransitionTime: models.V1Time{}, + Message: "", + Reason: "", + Status: ptr.StringPtr("Ready"), + Type: nil, + }, + EndTime: models.V1Time{}, + Manifests: nil, + Name: "test-pack-a", + ProfileUID: "test-profile-uid", + Services: nil, + StartTime: models.V1Time{}, + Type: "test", + Version: "1.0.0", + }, + }, + LifecycleStatus: &models.V1LifecycleStatus{ + Msg: "test msg", + Status: "Deployed", + }, + State: "Deployed", + }, + }, + }, + }, + { + Method: "GET", + Path: "/v1/clustergroups/hostCluster", + Response: ResponseData{ + StatusCode: 200, + Payload: &models.V1ClusterGroupsHostClusterSummary{ + Summaries: []*models.V1ClusterGroupSummary{ + { + Metadata: &models.V1ObjectMeta{ + Name: "test-cluster-group", + UID: generateRandomStringUID(), + }, + Spec: &models.V1ClusterGroupSummarySpec{ + Scope: "project", + }, + }, + }, + }, + }, + }, + { + Method: "GET", + Path: "/v1/clustergroups/hostCluster/metadata", + Response: ResponseData{ + StatusCode: 200, + Payload: &models.V1ClusterGroupsHostClusterMetadata{ + Items: []*models.V1ObjectScopeEntity{ + { + Name: "test-cluster-group", + Scope: "system", + UID: generateRandomStringUID(), + }, + }, + }, + }, + }, + + { + Method: "GET", + Path: "/v1/overlords", + Response: ResponseData{ + StatusCode: 200, + Payload: &models.V1Overlords{ + Items: []*models.V1Overlord{ + { + Metadata: &models.V1ObjectMeta{ + Name: "test-pcg-name", + UID: "test-pcg-id", + }, + }, + }, + }, + }, + }, + { + Method: "GET", + Path: "/v1/dashboard/workspaces", + Response: ResponseData{ + StatusCode: 200, + Payload: &models.V1DashboardWorkspaces{ + Items: []*models.V1DashboardWorkspace{ + { + Metadata: &models.V1ObjectMeta{ + Name: "test-workspace", + UID: "test-workspace-uid", + }, + }, + }, + }, + }, + }, + { + Method: "POST", + Path: "/v1/dashboard/appProfiles", + Response: ResponseData{ + StatusCode: 200, + Payload: &models.V1AppProfilesSummary{ + AppProfiles: []*models.V1AppProfileSummary{ + { + Metadata: &models.V1ObjectMeta{ + Name: "test-application-profile", + UID: "1.0.0", + }, + Spec: &models.V1AppProfileSummarySpec{ + Version: "1.0.0", + Versions: []*models.V1AppProfileVersion{ + { + UID: generateRandomStringUID(), + Version: "1.0.0", + }, + }, + }, + }, + }, + Listmeta: nil, + }, + }, + }, + } +} + +func ClusterCommonNegativeRoutes() []Route { + return []Route{} +} diff --git a/tests/mockApiServer/routes/mockClusterGroup.go b/tests/mockApiServer/routes/mockClusterGroup.go new file mode 100644 index 00000000..57decbb5 --- /dev/null +++ b/tests/mockApiServer/routes/mockClusterGroup.go @@ -0,0 +1,97 @@ +package routes + +import "github.com/spectrocloud/palette-sdk-go/api/models" + +func ClusterGroupRoutes() []Route { + return []Route{ + { + Method: "POST", + Path: "/v1/clustergroups", + Response: ResponseData{ + StatusCode: 201, + Payload: map[string]string{"UID": "test-cg-1"}, + }, + }, + { + Method: "PUT", + Path: "/v1/clustergroups/{uid}", + Response: ResponseData{ + StatusCode: 204, + Payload: nil, + }, + }, + { + Method: "DELETE", + Path: "/v1/clustergroups/{uid}", + Response: ResponseData{ + StatusCode: 204, + Payload: nil, + }, + }, + { + Method: "GET", + Path: "/v1/clustergroups/{uid}", + Response: ResponseData{ + StatusCode: 200, + Payload: &models.V1ClusterGroup{ + Metadata: &models.V1ObjectMeta{ + Annotations: nil, + Labels: map[string]string{ + "test": "dev", + }, + Name: "test-cg", + UID: "test-cg-1", + }, + Spec: &models.V1ClusterGroupSpec{ + ClusterProfileTemplates: []*models.V1ClusterProfileTemplate{ + { + CloudType: "aws", + Name: "temp1", + PackServerRefs: nil, + PackServerSecret: "test-secret", + Packs: nil, + ProfileVersion: "1.0.0", + RelatedObject: nil, + Type: "cluster", + UID: "test-uid", + Version: 0, + }, + }, + ClusterRefs: []*models.V1ClusterGroupClusterRef{ + { + ClusterName: "test-cluster", + ClusterUID: "test-cluster-id", + }, + }, + ClustersConfig: &models.V1ClusterGroupClustersConfig{ + EndpointType: "test-end", + HostClustersConfig: []*models.V1ClusterGroupHostClusterConfig{ + { + ClusterUID: "test-cluster-id", + EndpointConfig: &models.V1HostClusterEndpointConfig{ + IngressConfig: &models.V1IngressConfig{ + Host: "121.0.0.1", + Port: 1001, + }, + LoadBalancerConfig: &models.V1LoadBalancerConfig{ + ExternalIPs: []string{"0.0.0.0"}, + ExternalTrafficPolicy: "policy", + LoadBalancerSourceRanges: []string{"0.0.0.1"}, + }, + }, + }, + }, + KubernetesDistroType: "k8", + LimitConfig: nil, + Values: "test-values", + }, + Type: "", + }, + Status: &models.V1ClusterGroupStatus{ + IsActive: true, + }, + }, + }, + }, + } +} diff --git a/tests/mockApiServer/routes/mockClusterProfile.go b/tests/mockApiServer/routes/mockClusterProfile.go new file mode 100644 index 00000000..ba4e55b4 --- /dev/null +++ b/tests/mockApiServer/routes/mockClusterProfile.go @@ -0,0 +1,201 @@ +package routes + +import ( + "github.com/spectrocloud/gomi/pkg/ptr" + "github.com/spectrocloud/palette-sdk-go/api/models" + "net/http" +) + +func getClusterProfilesMetadataResponse() *models.V1ClusterProfilesMetadata { + return &models.V1ClusterProfilesMetadata{ + Items: []*models.V1ClusterProfileMetadata{ + { + Metadata: &models.V1ObjectEntity{ + Name: "test-cluster-profile-1", + UID: generateRandomStringUID(), + }, + Spec: &models.V1ClusterProfileMetadataSpec{ + CloudType: "aws", + Version: "1.0.0", + }, + }, + { + Metadata: &models.V1ObjectEntity{ + Name: "test-cluster-profile-2", + UID: generateRandomStringUID(), + }, + Spec: &models.V1ClusterProfileMetadataSpec{ + CloudType: "gcp", + Version: "1.0.0", + }, + }, + }, + } +} + +func getClusterProfileResponse() *models.V1ClusterProfile { + return &models.V1ClusterProfile{ + APIVersion: "", + Kind: "", + Metadata: &models.V1ObjectMeta{ + Annotations: map[string]string{ + "scope": "project", + }, + CreationTimestamp: models.V1Time{}, + DeletionTimestamp: models.V1Time{}, + Labels: nil, + LastModifiedTimestamp: models.V1Time{}, + Name: "test-cluster-profile-1", + UID: generateRandomStringUID(), + }, + Spec: &models.V1ClusterProfileSpec{ + Draft: nil, + Published: &models.V1ClusterProfileTemplate{ + CloudType: "aws", + Name: "test-cluster-profile-1", + PackServerRefs: nil, + PackServerSecret: "", + Packs: []*models.V1PackRef{ + { + Name: ptr.StringPtr("k8"), + PackUID: generateRandomStringUID(), + RegistryUID: generateRandomStringUID(), + Schema: nil, + Values: "{test-json:test}", + Version: "1.0.0", + }, + }, + ProfileVersion: "1.0.0", + RelatedObject: nil, + Type: "cluster", + UID: generateRandomStringUID(), + Version: 0, + }, + Version: "1.0.0", + Versions: nil, + }, + Status: &models.V1ClusterProfileStatus{ + HasUserMacros: false, + InUseClusters: nil, + IsPublished: true, + }, + } +} + +func getClusterProfilePackManifestResponse() *models.V1ManifestEntities { + return &models.V1ManifestEntities{ + Items: []*models.V1ManifestEntity{ + { + Metadata: &models.V1ObjectMeta{ + Annotations: nil, + CreationTimestamp: models.V1Time{}, + DeletionTimestamp: models.V1Time{}, + Labels: nil, + LastModifiedTimestamp: models.V1Time{}, + Name: "test-manifest-1", + UID: generateRandomStringUID(), + }, + Spec: &models.V1ManifestSpec{ + Draft: &models.V1ManifestData{ + Content: "test-content", + Digest: "test-digest", + }, + Published: &models.V1ManifestData{ + Content: "test-content", + Digest: "test-digest", + }, + }, + }, + }, + } +} + +func ClusterProfileRoutes() []Route { + return []Route{ + { + Method: "POST", + Path: "/v1/clusterprofiles/import/file", + Response: ResponseData{ + StatusCode: 201, + Payload: map[string]string{"UID": "cluster-profile-import-1"}, + }, + }, + { + Method: "GET", + Path: "/v1/clusterprofiles/{uid}/variables", + Response: ResponseData{ + StatusCode: 200, + Payload: &models.V1Variables{}, + }, + }, + { + Method: "DELETE", + Path: "/v1/clusterprofiles/{uid}", + Response: ResponseData{ + StatusCode: 204, + Payload: nil, + }, + }, + { + Method: "POST", + Path: "/v1/clusterprofiles", + Response: ResponseData{ + StatusCode: 201, + Payload: map[string]string{"UID": "cluster-profile-1"}, + }, + }, + { + Method: "PATCH", + Path: "/v1/clusterprofiles/{uid}/publish", + Response: ResponseData{ + StatusCode: 204, + Payload: nil, + }, + }, + { + Method: "GET", + Path: "/v1/dashboard/clusterprofiles/metadata", + Response: ResponseData{ + StatusCode: 200, + Payload: getClusterProfilesMetadataResponse(), + }, + }, + { + Method: "GET", + Path: "/v1/clusterprofiles/{uid}", + Response: ResponseData{ + StatusCode: 200, + Payload: getClusterProfileResponse(), + }, + }, + { + Method: "GET", + Path: "/v1/clusterprofiles/{uid}/packs/{packName}/manifests", + Response: ResponseData{ + StatusCode: 200, + Payload: getClusterProfilePackManifestResponse(), + }, + }, + } +} + +func ClusterProfileNegativeRoutes() []Route { + return []Route{ + { + Method: "GET", + Path: "/v1/dashboard/clusterprofiles/metadata", + Response: ResponseData{ + StatusCode: 200, + Payload: &models.V1ClusterProfilesMetadata{}, + }, + }, + { + Method: "GET", + Path: "/v1/clusterprofiles/{uid}", + Response: ResponseData{ + StatusCode: http.StatusLocked, + Payload: nil, + }, + }, + } +} diff --git a/tests/mockApiServer/routes/mockFilter.go b/tests/mockApiServer/routes/mockFilter.go new file mode 100644 index 00000000..e760934c --- /dev/null +++ b/tests/mockApiServer/routes/mockFilter.go @@ -0,0 +1,136 @@ +package routes + +import ( + "github.com/spectrocloud/gomi/pkg/ptr" + "github.com/spectrocloud/palette-sdk-go/api/models" + "net/http" + "strconv" +) + +func getFiltersResponse() models.V1FiltersSummary { + return models.V1FiltersSummary{ + Items: []*models.V1FilterSummary{ + { + Metadata: &models.V1ObjectMeta{ + Annotations: nil, + CreationTimestamp: models.V1Time{}, + DeletionTimestamp: models.V1Time{}, + Labels: nil, + LastModifiedTimestamp: models.V1Time{}, + Name: "test-filter-1", + UID: generateRandomStringUID(), + }, + Spec: &models.V1FilterSummarySpec{ + FilterType: "test", + }, + }, + { + Metadata: &models.V1ObjectMeta{ + Annotations: nil, + CreationTimestamp: models.V1Time{}, + DeletionTimestamp: models.V1Time{}, + Labels: nil, + LastModifiedTimestamp: models.V1Time{}, + Name: "test-filter-2", + UID: generateRandomStringUID(), + }, + Spec: &models.V1FilterSummarySpec{ + FilterType: "test", + }, + }, + }, + Listmeta: nil, + } +} + +func getFilterSummary() *models.V1TagFilterSummary { + return &models.V1TagFilterSummary{ + Metadata: &models.V1ObjectMeta{ + Annotations: nil, + CreationTimestamp: models.V1Time{}, + DeletionTimestamp: models.V1Time{}, + Labels: nil, + LastModifiedTimestamp: models.V1Time{}, + Name: "test-filter-2", + UID: generateRandomStringUID(), + }, + Spec: &models.V1TagFilterSpec{ + FilterGroup: &models.V1TagFilterGroup{ + Conjunction: (*models.V1SearchFilterConjunctionOperator)(ptr.StringPtr("and")), + Filters: []*models.V1TagFilterItem{ + { + Key: "name", + Negation: false, + Operator: "", + Values: []string{"test"}, + }, + }, + }, + }, + } +} +func FilterRoutes() []Route { + return []Route{ + { + Method: "GET", + Path: "/v1/filters", + Response: ResponseData{ + StatusCode: http.StatusOK, + Payload: getFiltersResponse(), + }, + }, + { + Method: "POST", + Path: "/v1/filters/tag", + Response: ResponseData{ + StatusCode: 201, + Payload: map[string]string{"UID": "test-filter-id"}, + }, + }, + { + Method: "PUT", + Path: "/v1/filters/tag/{uid}", + Response: ResponseData{ + StatusCode: 204, + Payload: nil, + }, + }, + { + Method: "DELETE", + Path: "/v1/filters/tag/{uid}", + Response: ResponseData{ + StatusCode: 204, + Payload: nil, + }, + }, + { + Method: "GET", + Path: "/v1/filters/tag/{uid}", + Response: ResponseData{ + StatusCode: http.StatusOK, + Payload: getFilterSummary(), + }, + }, + } +} + +func FilterNegativeRoutes() []Route { + return []Route{ + { + Method: "GET", + Path: "/v1/filters", + Response: ResponseData{ + StatusCode: http.StatusNotFound, + Payload: getError(strconv.Itoa(http.StatusOK), "filter not found"), + }, + }, + { + Method: "GET", + Path: "/v1/filters/tag/{uid}", + Response: ResponseData{ + StatusCode: http.StatusNotFound, + Payload: getError(strconv.Itoa(http.StatusNotFound), "filter not found"), + }, + }, + } +} diff --git a/tests/mockApiServer/routes/mockIPpool.go b/tests/mockApiServer/routes/mockIPpool.go new file mode 100644 index 00000000..c0c1c2b1 --- /dev/null +++ b/tests/mockApiServer/routes/mockIPpool.go @@ -0,0 +1,69 @@ +package routes + +import "github.com/spectrocloud/palette-sdk-go/api/models" + +func IPPoolRoutes() []Route { + return []Route{ + { + Method: "POST", + Path: "/v1/overlords/vsphere/{uid}/pools", + Response: ResponseData{ + StatusCode: 201, + Payload: map[string]string{"UID": "test-pcg-id"}, + }, + }, + { + Method: "PUT", + Path: "/v1/overlords/vsphere/{uid}/pools/{poolUid}", + Response: ResponseData{ + StatusCode: 204, + Payload: nil, + }, + }, + { + Method: "DELETE", + Path: "/v1/overlords/vsphere/{uid}/pools/{poolUid}", + Response: ResponseData{ + StatusCode: 204, + Payload: nil, + }, + }, + { + Method: "GET", + Path: "/v1/overlords/vsphere/{uid}/pools", + Response: ResponseData{ + StatusCode: 200, + Payload: &models.V1IPPools{ + Items: []*models.V1IPPoolEntity{ + { + Metadata: &models.V1ObjectMeta{ + Name: "test-name", + UID: "test-pcg-id", + }, + Spec: &models.V1IPPoolEntitySpec{ + Pool: &models.V1Pool{ + End: "test-end", + Gateway: "test-gateway", + Nameserver: &models.V1Nameserver{ + Addresses: []string{"test-address"}, + Search: []string{"test-search"}, + }, + Prefix: 0, + Start: "teat-start", + Subnet: "test-subnet", + }, + PriavetGatewayUID: "test-pcg-id", + RestrictToSingleCluster: false, + }, + Status: &models.V1IPPoolStatus{ + AllottedIps: nil, + AssociatedClusters: nil, + InUse: false, + }, + }, + }, + }, + }, + }, + } +} diff --git a/tests/mockApiServer/routes/mockMacros.go b/tests/mockApiServer/routes/mockMacros.go new file mode 100644 index 00000000..5565a73f --- /dev/null +++ b/tests/mockApiServer/routes/mockMacros.go @@ -0,0 +1,169 @@ +package routes + +import ( + "github.com/spectrocloud/palette-sdk-go/api/models" + "net/http" + "strconv" +) + +//func getMockMacroPayload() models.V1Macro { +// return models.V1Macro{ +// Name: "SampleMacro", +// Value: "SampleValue", +// } +//} + +func getMockMacrosPayload() *models.V1Macros { + return &models.V1Macros{ + Macros: []*models.V1Macro{ + { + Name: "macro1", + Value: "value1", + }, + { + Name: "macro2", + Value: "value2", + }, + }, + } +} + +func MacrosRoutes() []Route { + return []Route{ + { + Method: "POST", + Path: "/v1/projects/{uid}/macros", + Response: ResponseData{ + StatusCode: http.StatusNoContent, + Payload: map[string]interface{}{"UID": generateRandomStringUID()}, + }, + }, + + { + Method: "POST", + Path: "/v1/tenants/{tenantUid}/macros", + Response: ResponseData{ + StatusCode: http.StatusNoContent, + Payload: nil, + }, + }, + { + Method: "GET", + Path: "/v1/projects/{uid}/macros", + Response: ResponseData{ + StatusCode: http.StatusOK, + Payload: getMockMacrosPayload(), + }, + }, + { + Method: "GET", + Path: "/v1/tenants/{tenantUid}/macros", + Response: ResponseData{ + StatusCode: http.StatusOK, + Payload: getMockMacrosPayload(), + }, + }, + { + Method: "PUT", + Path: "/v1/projects/{uid}/macros", + Response: ResponseData{ + StatusCode: http.StatusNoContent, + Payload: map[string]interface{}{"UID": generateRandomStringUID()}, + }, + }, + { + Method: "PUT", + Path: "/v1/tenants/{tenantUid}/macros", + Response: ResponseData{ + StatusCode: http.StatusNoContent, + Payload: map[string]interface{}{"UID": generateRandomStringUID()}, + }, + }, + { + Method: "DELETE", + Path: "/v1/projects/{uid}/macros", + Response: ResponseData{ + StatusCode: http.StatusNoContent, + Payload: nil, + }, + }, + { + Method: "DELETE", + Path: "/v1/tenants/{tenantUid}/macros", + Response: ResponseData{ + StatusCode: http.StatusNoContent, + Payload: nil, + }, + }, + } +} + +func MacrosNegativeRoutes() []Route { + return []Route{ + { + Method: "POST", + Path: "/v1/projects/{uid}/macros", + Response: ResponseData{ + StatusCode: http.StatusConflict, + Payload: getError(strconv.Itoa(http.StatusConflict), "Macro already exists"), + }, + }, + { + Method: "GET", + Path: "/v1/projects/{uid}/macros", + Response: ResponseData{ + StatusCode: http.StatusNotFound, + Payload: getError(strconv.Itoa(http.StatusOK), "Macro not found"), + }, + }, + { + Method: "PUT", + Path: "/v1/projects/{uid}/macros", + Response: ResponseData{ + StatusCode: http.StatusMethodNotAllowed, + Payload: getError(strconv.Itoa(http.StatusNoContent), "Operation not allowed"), + }, + }, + { + Method: "DELETE", + Path: "/v1/projects/{uid}/macros", + Response: ResponseData{ + StatusCode: http.StatusNotFound, + Payload: getError(strconv.Itoa(http.StatusOK), "Macro not found"), + }, + }, + // for tenant + { + Method: "POST", + Path: "/v1/tenants/{tenantUid}/macros", + Response: ResponseData{ + StatusCode: http.StatusConflict, + Payload: getError(strconv.Itoa(http.StatusConflict), "Macro already exists"), + }, + }, + { + Method: "GET", + Path: "/v1/tenants/{tenantUid}/macros", + Response: ResponseData{ + StatusCode: http.StatusNotFound, + Payload: getError(strconv.Itoa(http.StatusOK), "Macro not found"), + }, + }, + { + Method: "PUT", + Path: "/v1/tenants/{tenantUid}/macros", + Response: ResponseData{ + StatusCode: http.StatusMethodNotAllowed, + Payload: getError(strconv.Itoa(http.StatusNoContent), "Operation not allowed"), + }, + }, + { + Method: "DELETE", + Path: "/v1/tenants/{tenantUid}/macros", + Response: ResponseData{ + StatusCode: http.StatusNotFound, + Payload: getError(strconv.Itoa(http.StatusOK), "Macro not found"), + }, + }, + } +} diff --git a/tests/mockApiServer/routes/mockPacks.go b/tests/mockApiServer/routes/mockPacks.go new file mode 100644 index 00000000..8f3cad8a --- /dev/null +++ b/tests/mockApiServer/routes/mockPacks.go @@ -0,0 +1,201 @@ +package routes + +import ( + "github.com/spectrocloud/palette-sdk-go/api/models" + "net/http" +) + +func getPackSummaryPayload() *models.V1PackSummaries { + return &models.V1PackSummaries{ + Items: []*models.V1PackSummary{ + { + APIVersion: "", + Kind: "", + Metadata: &models.V1ObjectMeta{ + Annotations: nil, + CreationTimestamp: models.V1Time{}, + DeletionTimestamp: models.V1Time{}, + Labels: nil, + LastModifiedTimestamp: models.V1Time{}, + Name: "k8", + UID: generateRandomStringUID(), + }, + Spec: &models.V1PackSummarySpec{ + CloudTypes: []string{"aws"}, + AddonType: "infra", + Name: "k8", + RegistryUID: "test-reg-uid", + Type: "helm", + Values: "test-test", + Version: "1.0", + }, + Status: nil, + }, + }, + Listmeta: nil, + } +} + +func getPackSummaryPayloadWithMultiPacks() *models.V1PackSummaries { + return &models.V1PackSummaries{ + Items: []*models.V1PackSummary{ + { + APIVersion: "", + Kind: "", + Metadata: &models.V1ObjectMeta{ + Annotations: nil, + CreationTimestamp: models.V1Time{}, + DeletionTimestamp: models.V1Time{}, + Labels: nil, + LastModifiedTimestamp: models.V1Time{}, + Name: "k8", + UID: generateRandomStringUID(), + }, + Spec: &models.V1PackSummarySpec{ + CloudTypes: []string{"aws"}, + AddonType: "infra", + Name: "k8", + RegistryUID: "test-reg-uid", + Type: "helm", + Values: "test-test", + Version: "1.0", + }, + Status: nil, + }, + { + APIVersion: "", + Kind: "", + Metadata: &models.V1ObjectMeta{ + Annotations: nil, + CreationTimestamp: models.V1Time{}, + DeletionTimestamp: models.V1Time{}, + Labels: nil, + LastModifiedTimestamp: models.V1Time{}, + Name: "cni", + UID: generateRandomStringUID(), + }, + Spec: &models.V1PackSummarySpec{ + CloudTypes: []string{"aws"}, + AddonType: "infra", + Name: "cni", + RegistryUID: "test-reg-uid", + Type: "helm", + Values: "test-test", + Version: "1.0", + }, + Status: nil, + }, + }, + Listmeta: nil, + } +} + +func getPacksNameRegistryUIDNegative() *models.V1PackTagEntity { + return &models.V1PackTagEntity{ + AddonSubType: "", + AddonType: "infra", + CloudTypes: []string{"aws", "eks"}, + DisplayName: "k8", + Layer: "", + LogoURL: "", + Name: "k8", + PackValues: []*models.V1PackUIDValues{ + { + Annotations: nil, + Dependencies: nil, + PackUID: generateRandomStringUID(), + Presets: nil, + Readme: "", + Schema: nil, + Template: nil, + Values: "test-test", + }, + }, + RegistryUID: generateRandomStringUID(), + Tags: []*models.V1PackTags{ + { + Group: "dev", + PackUID: generateRandomStringUID(), + ParentTags: nil, + Tag: "unit-test", + Version: "1.0", + }, + }, + } +} + +func getPacksNameRegistryUID() *models.V1PackTagEntity { + return &models.V1PackTagEntity{ + AddonSubType: "", + AddonType: "infra", + CloudTypes: []string{"aws", "eks"}, + DisplayName: "k8", + Layer: "", + LogoURL: "", + Name: "k8", + PackValues: []*models.V1PackUIDValues{ + { + Annotations: nil, + Dependencies: nil, + PackUID: "test-pack-uid", + Presets: nil, + Readme: "", + Schema: nil, + Template: nil, + Values: "test-test", + }, + }, + RegistryUID: generateRandomStringUID(), + Tags: []*models.V1PackTags{ + { + Group: "dev", + PackUID: "test-pack-uid", + ParentTags: nil, + Tag: "unit-test", + Version: "1.0", + }, + }, + } +} + +func PacksRoutes() []Route { + return []Route{ + { + Method: "GET", + Path: "/v1/packs", + Response: ResponseData{ + StatusCode: http.StatusOK, + Payload: getPackSummaryPayload(), + }, + }, + { + Method: "GET", + Path: "/v1/packs/{packName}/registries/{registryUid}", + Response: ResponseData{ + StatusCode: http.StatusOK, + Payload: getPacksNameRegistryUID(), + }, + }, + } +} + +func PacksNegativeRoutes() []Route { + return []Route{ + { + Method: "GET", + Path: "/v1/packs", + Response: ResponseData{ + StatusCode: http.StatusOK, + Payload: getPackSummaryPayloadWithMultiPacks(), + }, + }, + { + Method: "GET", + Path: "/v1/packs/{packName}/registries/{registryUid}", + Response: ResponseData{ + StatusCode: http.StatusOK, + Payload: getPacksNameRegistryUIDNegative(), + }, + }, + } +} diff --git a/tests/mockApiServer/routes/mockProjects.go b/tests/mockApiServer/routes/mockProjects.go new file mode 100644 index 00000000..ffe4e2ec --- /dev/null +++ b/tests/mockApiServer/routes/mockProjects.go @@ -0,0 +1,159 @@ +package routes + +import ( + "github.com/spectrocloud/palette-sdk-go/api/models" + "net/http" + "strconv" +) + +func getMockProjectPayload() models.V1Project { + return models.V1Project{ + Metadata: &models.V1ObjectMeta{ + Annotations: nil, + CreationTimestamp: models.V1Time{}, + DeletionTimestamp: models.V1Time{}, + Labels: map[string]string{ + "description": "default project", + }, + LastModifiedTimestamp: models.V1Time{}, + Name: "Default", + UID: generateRandomStringUID(), + }, + Spec: &models.V1ProjectSpec{ + Alerts: nil, + LogoURL: "", + Teams: nil, + Users: nil, + }, + Status: &models.V1ProjectStatus{ + CleanUpStatus: nil, + IsDisabled: false, + }, + } + +} + +func ProjectRoutes() []Route { + return []Route{ + { + Method: "POST", + Path: "/v1/projects", + Response: ResponseData{ + StatusCode: http.StatusCreated, + Payload: map[string]interface{}{"UID": generateRandomStringUID()}, + }, + }, + { + Method: "GET", + Path: "/v1/projects/{uid}", + Response: ResponseData{ + StatusCode: http.StatusOK, + Payload: getMockProjectPayload(), + }, + }, + { + Method: "GET", + Path: "/v1/dashboard/projects/metadata", + Response: ResponseData{ + StatusCode: http.StatusOK, + Payload: models.V1ProjectsMetadata{ + Items: []*models.V1ProjectMetadata{ + { + Metadata: &models.V1ObjectEntity{ + Name: "Default", + UID: generateRandomStringUID(), + }, + }, + }, + }, + }, + }, + { + Method: "PUT", + Path: "/v1/projects/{uid}", + Response: ResponseData{ + StatusCode: http.StatusNoContent, + Payload: map[string]interface{}{"UID": generateRandomStringUID()}, + }, + }, + { + Method: "DELETE", + Path: "/v1/projects/{uid}", + Response: ResponseData{ + StatusCode: http.StatusNoContent, + Payload: nil, + }, + }, + { + Method: "GET", + Path: "/v1/dashboard/projects/metadata", + Response: ResponseData{ + StatusCode: http.StatusOK, + Payload: &models.V1ProjectsMetadata{ + Items: []*models.V1ProjectMetadata{ + { + Metadata: &models.V1ObjectEntity{ + Name: "Default", + UID: generateRandomStringUID(), + }, + }, + }, + }, + }, + }, + } +} + +func ProjectNegativeRoutes() []Route { + return []Route{ + { + Method: "POST", + Path: "/v1/projects", + Response: ResponseData{ + StatusCode: http.StatusConflict, + Payload: getError(strconv.Itoa(http.StatusConflict), "Project already exist"), + }, + }, + { + Method: "GET", + Path: "/v1/projects/{uid}", + Response: ResponseData{ + StatusCode: http.StatusNotFound, + Payload: getError(strconv.Itoa(http.StatusNotFound), "Project not found"), + }, + }, + { + Method: "PUT", + Path: "/v1/projects/{uid}", + Response: ResponseData{ + StatusCode: http.StatusMethodNotAllowed, + Payload: getError(strconv.Itoa(http.StatusMethodNotAllowed), "Operation not allowed"), + }, + }, + { + Method: "DELETE", + Path: "/v1/projects/{uid}", + Response: ResponseData{ + StatusCode: http.StatusNotFound, + Payload: getError(strconv.Itoa(http.StatusNotFound), "Project not found"), + }, + }, + { + Method: "GET", + Path: "/v1/dashboard/projects/metadata", + Response: ResponseData{ + StatusCode: http.StatusOK, + Payload: models.V1ProjectsMetadata{ + Items: []*models.V1ProjectMetadata{ + { + Metadata: &models.V1ObjectEntity{ + Name: "Default", + UID: generateRandomStringUID(), + }, + }, + }, + }, + }, + }, + } +} diff --git a/tests/mockApiServer/routes/mockRegistries.go b/tests/mockApiServer/routes/mockRegistries.go new file mode 100644 index 00000000..ccea231d --- /dev/null +++ b/tests/mockApiServer/routes/mockRegistries.go @@ -0,0 +1,213 @@ +package routes + +import ( + "github.com/spectrocloud/gomi/pkg/ptr" + "github.com/spectrocloud/palette-sdk-go/api/models" + "net/http" +) + +func getHelmRegistryPayload() *models.V1HelmRegistry { + return &models.V1HelmRegistry{ + APIVersion: "", + Kind: "", + Metadata: &models.V1ObjectMeta{ + Annotations: nil, + CreationTimestamp: models.V1Time{}, + DeletionTimestamp: models.V1Time{}, + Labels: nil, + LastModifiedTimestamp: models.V1Time{}, + Name: "Public", + UID: generateRandomStringUID(), + }, + Spec: &models.V1HelmRegistrySpec{ + Auth: &models.V1RegistryAuth{ + Password: "test=pwd", + TLS: nil, + Token: "as", + Type: "token", + Username: "sf", + }, + Endpoint: ptr.StringPtr("test.com"), + IsPrivate: false, + Name: "Public", + RegistryUID: generateRandomStringUID(), + Scope: "project", + }, + Status: &models.V1HelmRegistryStatus{ + HelmSyncStatus: &models.V1RegistrySyncStatus{ + LastRunTime: models.V1Time{}, + LastSyncedTime: models.V1Time{}, + Message: "", + Status: "Active", + }, + }, + } +} + +func RegistriesRoutes() []Route { + return []Route{ + { + Method: "PUT", + Path: "/v1/registries/oci/{uid}/ecr", + Response: ResponseData{ + StatusCode: 204, + Payload: nil, + }, + }, + { + Method: "DELETE", + Path: "/v1/registries/oci/{uid}/ecr", + Response: ResponseData{ + StatusCode: 204, + Payload: nil, + }, + }, + { + Method: "GET", + Path: "/v1/registries/oci/{uid}/ecr", + Response: ResponseData{ + StatusCode: 200, + Payload: &models.V1EcrRegistry{ + Kind: "", + Metadata: &models.V1ObjectMeta{ + Annotations: nil, + CreationTimestamp: models.V1Time{}, + DeletionTimestamp: models.V1Time{}, + Labels: nil, + LastModifiedTimestamp: models.V1Time{}, + Name: "testSecretRegistry", + UID: "testSecretRegistry-id", + }, + Spec: &models.V1EcrRegistrySpec{ + BaseContentPath: "test-path", + Credentials: &models.V1AwsCloudAccount{ + AccessKey: "test-key", + CredentialType: "sts", + Partition: ptr.StringPtr("test-part"), + PolicyARNs: []string{"test-arns"}, + SecretKey: "test-secret-key", + Sts: &models.V1AwsStsCredentials{ + Arn: "test-arn", + ExternalID: "test-external-id", + }, + }, + DefaultRegion: "test-region", + Endpoint: ptr.StringPtr("test.point"), + IsPrivate: ptr.BoolPtr(false), + ProviderType: ptr.StringPtr("test-type"), + RegistryUID: "test-reg-uid", + Scope: "project", + TLS: &models.V1TLSConfiguration{ + Ca: "test-ca", + Certificate: "test-cert", + Enabled: false, + InsecureSkipVerify: false, + Key: "test-key", + }, + }, + }, + }, + }, + { + Method: "POST", + Path: "/v1/registries/oci/ecr", + Response: ResponseData{ + StatusCode: 201, + Payload: map[string]string{"UID": "test-sts-oci-reg-ecr-uid"}, + }, + }, + { + Method: "GET", + Path: "/v1/registries/oci/summary", + Response: ResponseData{ + StatusCode: http.StatusOK, + Payload: &models.V1OciRegistries{ + Items: []*models.V1OciRegistry{ + { + Metadata: &models.V1ObjectMeta{ + Name: "test-registry-oci", + UID: generateRandomStringUID(), + }, + Spec: nil, + Status: nil, + }, + }, + }, + }, + }, + { + Method: "POST", + Path: "/v1/registries/helm", + Response: ResponseData{ + StatusCode: 201, + Payload: map[string]string{"UID": generateRandomStringUID()}, + }, + }, + { + Method: "DELETE", + Path: "/v1/registries/helm/{uid}", + Response: ResponseData{ + StatusCode: 204, + Payload: nil, + }, + }, + { + Method: "PUT", + Path: "/v1/registries/helm/{uid}", + Response: ResponseData{ + StatusCode: 204, + Payload: nil, + }, + }, + { + Method: "GET", + Path: "/v1/registries/helm", + Response: ResponseData{ + StatusCode: http.StatusOK, + Payload: &models.V1HelmRegistries{ + Items: []*models.V1HelmRegistry{getHelmRegistryPayload()}, + }, + }, + }, + { + Method: "GET", + Path: "/v1/registries/helm/{uid}", + Response: ResponseData{ + StatusCode: http.StatusOK, + Payload: getHelmRegistryPayload(), + }, + }, + { + Method: "GET", + Path: "/v1/registries/metadata", + Response: ResponseData{ + StatusCode: 200, + Payload: &models.V1RegistriesMetadata{ + Items: []*models.V1RegistryMetadata{ + { + IsDefault: false, + IsPrivate: false, + Kind: "", + Name: "test-registry-name", + Scope: "project", + UID: "test-registry-uid", + }, + }, + }, + }, + }, + } +} + +func RegistriesNegativeRoutes() []Route { + return []Route{ + { + Method: "GET", + Path: "/v1/registries/helm/{uid}", + Response: ResponseData{ + StatusCode: http.StatusOK, + Payload: getHelmRegistryPayload(), + }, + }, + } +} diff --git a/tests/mockApiServer/routes/mockRole.go b/tests/mockApiServer/routes/mockRole.go new file mode 100644 index 00000000..c44500f7 --- /dev/null +++ b/tests/mockApiServer/routes/mockRole.go @@ -0,0 +1,65 @@ +package routes + +import ( + "github.com/spectrocloud/palette-sdk-go/api/models" + "net/http" + "strconv" +) + +func getRolesList() *models.V1Roles { + return &models.V1Roles{ + Items: []*models.V1Role{ + { + Metadata: &models.V1ObjectMeta{ + Annotations: nil, + CreationTimestamp: models.V1Time{}, + DeletionTimestamp: models.V1Time{}, + Labels: nil, + LastModifiedTimestamp: models.V1Time{}, + Name: "test-role", + UID: generateRandomStringUID(), + }, + Spec: &models.V1RoleSpec{ + Permissions: []string{"perm1", "perm2"}, + Scope: "project", + Type: "", + }, + Status: &models.V1RoleStatus{ + IsEnabled: true, + }, + }, + }, + Listmeta: &models.V1ListMetaData{ + Continue: "", + Count: 0, + Limit: 0, + Offset: 0, + }, + } +} + +func RolesRoutes() []Route { + return []Route{ + { + Method: "GET", + Path: "/v1/roles", + Response: ResponseData{ + StatusCode: http.StatusOK, + Payload: getRolesList(), + }, + }, + } +} + +func RolesNegativeRoutes() []Route { + return []Route{ + { + Method: "GET", + Path: "/v1/roles", + Response: ResponseData{ + StatusCode: http.StatusNotFound, + Payload: getError(strconv.Itoa(http.StatusNotFound), "No roles are found"), + }, + }, + } +} diff --git a/tests/mockApiServer/routes/mockTeam.go b/tests/mockApiServer/routes/mockTeam.go new file mode 100644 index 00000000..a8eb42b9 --- /dev/null +++ b/tests/mockApiServer/routes/mockTeam.go @@ -0,0 +1,143 @@ +package routes + +import ( + "github.com/spectrocloud/palette-sdk-go/api/models" + "net/http" +) + +func TeamRoutes() []Route { + return []Route{ + { + Method: "DELETE", + Path: "/v1/teams/{uid}", + Response: ResponseData{ + StatusCode: 204, + Payload: nil, + }, + }, + { + Method: "PUT", + Path: "/v1/teams/{uid}", + Response: ResponseData{ + StatusCode: 204, + Payload: nil, + }, + }, + { + Method: "GET", + Path: "/v1/teams/{uid}", + Response: ResponseData{ + StatusCode: 200, + Payload: &models.V1Team{ + Metadata: &models.V1ObjectMeta{ + Name: "team-name", + UID: "team-123", + }, + Spec: &models.V1TeamSpec{ + Roles: []string{"role1"}, + Sources: []string{"source1"}, + Users: []string{"user1"}, + }, + Status: nil, + }, + }, + }, + { + Method: "POST", + Path: "/v1/teams", + Response: ResponseData{ + StatusCode: http.StatusCreated, + Payload: map[string]interface{}{"UID": "team-123"}, + }, + }, + { + Method: "GET", + Path: "/v1/teams/{uid}/projects", + Response: ResponseData{ + StatusCode: 200, + Payload: &models.V1ProjectRolesEntity{ + Projects: []*models.V1UIDRoleSummary{ + { + InheritedRoles: nil, + Name: "testadmin", + Roles: []*models.V1UIDSummary{ + { + Name: "test-role", + UID: "test-role-123", + }, + }, + UID: "test-role-sum-id", + }, + }, + }, + }, + }, + { + Method: "PUT", + Path: "/v1/teams/{uid}/projects", + Response: ResponseData{ + StatusCode: 204, + Payload: nil, + }, + }, + { + Method: "GET", + Path: "/v1/teams/{uid}/roles", + Response: ResponseData{ + StatusCode: 200, + Payload: &models.V1TeamTenantRolesEntity{ + Roles: []*models.V1UIDSummary{ + { + Name: "test-tenant-name", + UID: "test-tenant-id", + }, + }, + }, + }, + }, + { + Method: "PUT", + Path: "/v1/teams/{uid}/roles", + Response: ResponseData{ + StatusCode: 204, + Payload: nil, + }, + }, + { + Method: "GET", + Path: "/v1/workspaces/teams/{teamUid}/roles", + Response: ResponseData{ + StatusCode: 200, + Payload: &models.V1WorkspaceScopeRoles{ + Projects: []*models.V1ProjectsWorkspaces{ + { + Name: "test-pjt-wp", + UID: "test-id1", + Workspaces: []*models.V1WorkspacesRoles{ + { + InheritedRoles: nil, + Name: "test-ws-name", + Roles: []*models.V1WorkspaceRolesUIDSummary{ + { + Name: "test-es-role-name", + UID: "test-id2", + }, + }, + UID: "test-id3", + }, + }, + }, + }, + }, + }, + }, + { + Method: "PUT", + Path: "/v1/workspaces/teams/{teamUid}/roles", + Response: ResponseData{ + StatusCode: 204, + Payload: nil, + }, + }, + } +} diff --git a/tests/mockApiServer/routes/mockTenant.go b/tests/mockApiServer/routes/mockTenant.go new file mode 100644 index 00000000..09a95b2a --- /dev/null +++ b/tests/mockApiServer/routes/mockTenant.go @@ -0,0 +1,38 @@ +package routes + +import ( + "github.com/spectrocloud/palette-sdk-go/api/models" + "net/http" +) + +func TenantRoutes() []Route { + return []Route{ + { + Method: "GET", + Path: "/v1/users/info", + Response: ResponseData{ + StatusCode: http.StatusOK, + Payload: models.V1UserInfo{ + OrgName: "Default", + TenantUID: generateRandomStringUID(), + UserUID: generateRandomStringUID(), + }, + }, + }} +} + +func TenantNegativeRoutes() []Route { + return []Route{ + { + Method: "GET", + Path: "/v1/users/info", + Response: ResponseData{ + StatusCode: http.StatusOK, + Payload: models.V1UserInfo{ + OrgName: "Default", + TenantUID: generateRandomStringUID(), + UserUID: generateRandomStringUID(), + }, + }, + }} +} diff --git a/tests/mockApiServer/routes/mockUsers.go b/tests/mockApiServer/routes/mockUsers.go new file mode 100644 index 00000000..e8ce0e7c --- /dev/null +++ b/tests/mockApiServer/routes/mockUsers.go @@ -0,0 +1,92 @@ +package routes + +import ( + "github.com/spectrocloud/palette-sdk-go/api/models" + "net/http" + "strconv" +) + +func getUsersResponse() models.V1Users { + return models.V1Users{ + Items: []*models.V1User{ + { + Metadata: &models.V1ObjectMeta{ + Annotations: nil, + CreationTimestamp: models.V1Time{}, + DeletionTimestamp: models.V1Time{}, + Labels: nil, + LastModifiedTimestamp: models.V1Time{}, + Name: "test", + UID: "12345", + }, + Spec: &models.V1UserSpec{ + EmailID: "test@spectrocloud.com", + FirstName: "test", + LastName: "spectro", + Roles: nil, + }, + Status: &models.V1UserStatus{ + ActivationLink: "", + IsActive: true, + IsPasswordResetting: false, + LastSignIn: models.V1Time{}, + }, + }, + { + Metadata: &models.V1ObjectMeta{ + Annotations: nil, + CreationTimestamp: models.V1Time{}, + DeletionTimestamp: models.V1Time{}, + Labels: nil, + LastModifiedTimestamp: models.V1Time{}, + Name: "test-user2", + UID: "test-user-12345", + }, + Spec: &models.V1UserSpec{ + EmailID: "test-user2@spectrocloud.com", + FirstName: "test-user2", + LastName: "spectro", + Roles: nil, + }, + Status: &models.V1UserStatus{ + ActivationLink: "", + IsActive: true, + IsPasswordResetting: false, + LastSignIn: models.V1Time{}, + }, + }, + }, + Listmeta: &models.V1ListMetaData{ + Continue: "", + Count: 2, + Limit: 10, + Offset: 0, + }, + } +} + +func UserRoutes() []Route { + return []Route{ + { + Method: "GET", + Path: "/v1/users", + Response: ResponseData{ + StatusCode: http.StatusOK, + Payload: getUsersResponse(), + }, + }, + } +} + +func UserNegativeRoutes() []Route { + return []Route{ + { + Method: "GET", + Path: "/v1/users", + Response: ResponseData{ + StatusCode: http.StatusNotFound, + Payload: getError(strconv.Itoa(http.StatusOK), "User not found"), + }, + }, + } +} diff --git a/tests/mockApiServer/routes/mockWorkSpace.go b/tests/mockApiServer/routes/mockWorkSpace.go new file mode 100644 index 00000000..91afb5c9 --- /dev/null +++ b/tests/mockApiServer/routes/mockWorkSpace.go @@ -0,0 +1,126 @@ +package routes + +import ( + "github.com/spectrocloud/palette-sdk-go/api/models" +) + +func WorkSpaceRoutes() []Route { + return []Route{ + { + Method: "POST", + Path: "/v1/workspaces", + Response: ResponseData{ + StatusCode: 201, + Payload: map[string]string{"UID": "test-ws-1"}, + }, + }, + { + Method: "DELETE", + Path: "/v1/workspaces/{uid}", + Response: ResponseData{ + StatusCode: 204, + Payload: nil, + }, + }, + { + Method: "GET", + Path: "/v1/workspaces/{uid}", + Response: ResponseData{ + StatusCode: 0, + Payload: &models.V1Workspace{ + Metadata: &models.V1ObjectMeta{ + Annotations: nil, + Labels: nil, + Name: "test-ws-1", + UID: "test-ws-1-id", + }, + Spec: &models.V1WorkspaceSpec{ + ClusterNamespaces: []*models.V1WorkspaceClusterNamespace{ + { + Image: &models.V1WorkspaceNamespaceImage{ + BlackListedImages: []string{"image1"}, + }, + IsRegex: false, + Name: "test-ws-ns", + NamespaceResourceAllocation: &models.V1WorkspaceNamespaceResourceAllocation{ + ClusterResourceAllocations: []*models.V1ClusterResourceAllocation{ + { + ClusterUID: "test-cluster-uid", + ResourceAllocation: &models.V1WorkspaceResourceAllocation{ + CPUCores: 2, + MemoryMiB: 100, + }, + }, + }, + DefaultResourceAllocation: &models.V1WorkspaceResourceAllocation{ + CPUCores: 2, + MemoryMiB: 100, + }, + }, + }, + }, + ClusterRbacs: []*models.V1ClusterRbac{ + { + Metadata: &models.V1ObjectMeta{ + Name: "test-rbac-name", + UID: "test-rbac-id", + }, + Spec: &models.V1ClusterRbacSpec{ + Bindings: []*models.V1ClusterRbacBinding{ + { + Namespace: "test-ns", + Role: nil, + Subjects: nil, + Type: "ns", + }, + }, + RelatedObject: &models.V1RelatedObject{ + Kind: "test", + Name: "test-ro", + UID: "test-ro-id", + }, + }, + Status: &models.V1ClusterRbacStatus{ + Errors: nil, + }, + }, + }, + ClusterRefs: []*models.V1WorkspaceClusterRef{ + { + ClusterName: "test-cluster-name", + ClusterUID: "test-cluster-id", + }, + }, + Policies: &models.V1WorkspacePolicies{ + BackupPolicy: &models.V1WorkspaceBackupConfigEntity{ + BackupConfig: &models.V1ClusterBackupConfig{ + BackupLocationName: "test-bl", + BackupLocationUID: "uid", + BackupName: "test-back-name", + BackupPrefix: "prefix", + DurationInHours: 0, + IncludeAllDisks: false, + IncludeClusterResources: false, + LocationType: "test-location", + Namespaces: nil, + Schedule: nil, + }, + ClusterUids: []string{"c-uid"}, + IncludeAllClusters: false, + }, + }, + Quota: &models.V1WorkspaceQuota{ + ResourceAllocation: &models.V1WorkspaceResourceAllocation{ + CPUCores: 2, + MemoryMiB: 100, + }, + }, + }, + Status: &models.V1WorkspaceStatus{ + Errors: nil, + }, + }, + }, + }, + } +} diff --git a/tests/mockApiServer/start_mock_api_server.sh b/tests/mockApiServer/start_mock_api_server.sh new file mode 100755 index 00000000..dc068d17 --- /dev/null +++ b/tests/mockApiServer/start_mock_api_server.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +export MOCK_SERVER_PATH="$TF_SRC/tests/mockApiServer" + +# Navigate to the mock API server directory +cd $MOCK_SERVER_PATH || exit + +# Generate the private key +openssl genpkey -algorithm RSA -out mock_server.key -pkeyopt rsa_keygen_bits:2048 + +# Generate the self-signed certificate with default input +openssl req -new -x509 -key mock_server.key -out mock_server.crt -days 365 -subj "/C=US/ST=CA/L=City/O=Organization/OU=Department/CN=localhost" + +# Build the Go project +go build -o MockBuild apiServerMock.go + +# Run the server in the background and redirect output to server.log +nohup ./MockBuild > mock_api_server.log 2>&1 & \ No newline at end of file diff --git a/tests/mockApiServer/stop_mock_api_server.sh b/tests/mockApiServer/stop_mock_api_server.sh new file mode 100755 index 00000000..39672f73 --- /dev/null +++ b/tests/mockApiServer/stop_mock_api_server.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +# Find the process ID of the running mockApiserver +PID=$(pgrep -f MockBuild) + +if [ -z "$PID" ]; then + echo "MockAPIServer is not running." +else + # Kill the process + kill $PID + echo "MockAPIServer (PID: $PID) has been stopped." +fi +[ -f "./MockBuild" ] && rm -f "./MockBuild" \ No newline at end of file