diff --git a/go.mod b/go.mod index 54a98503..45a05788 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ require ( github.com/hashicorp/go-retryablehttp v0.6.6 github.com/hashicorp/terraform-plugin-sdk/v2 v2.24.1 github.com/selectel/craas-go v0.3.0 - github.com/selectel/dbaas-go v0.9.0 + github.com/selectel/dbaas-go v0.10.0 github.com/selectel/domains-go v0.5.0 github.com/selectel/go-selvpcclient/v3 v3.1.1 github.com/selectel/mks-go v0.12.0 diff --git a/go.sum b/go.sum index 2647168d..4d94d03e 100644 --- a/go.sum +++ b/go.sum @@ -151,8 +151,8 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/sebdah/goldie v1.0.0/go.mod h1:jXP4hmWywNEwZzhMuv2ccnqTSFpuq8iyQhtQdkkZBH4= github.com/selectel/craas-go v0.3.0 h1:tXiw3LNN+ZVV0wZdeBBXX6u8kMuA5PV/5W1uYqV0yXg= github.com/selectel/craas-go v0.3.0/go.mod h1:9RAUn9PdMITP4I3GAade6v2hjB2j3lo3J2dDlG5SLYE= -github.com/selectel/dbaas-go v0.9.0 h1:IAmiyxkRtfLZg1JUdIhcsE5jpdBvsZibPCqyhB+yV30= -github.com/selectel/dbaas-go v0.9.0/go.mod h1:8D945oFzpx94v08zIb4s1bRTPCdPoNVnBu4umMYFJrQ= +github.com/selectel/dbaas-go v0.10.0 h1:iY2Q7PY9ICoWBDtni+6oWGR2uAWodER0K2zchNLIOl4= +github.com/selectel/dbaas-go v0.10.0/go.mod h1:uyPhqmcvdmKBt9yWhogoSQgWkcZ9QgVlbgaERdSdAfk= github.com/selectel/domains-go v0.5.0 h1:RCrWY/9KHVtfdA+X8M+DDzsjILxFChhY70HnJEu1Y2U= github.com/selectel/domains-go v0.5.0/go.mod h1:AhXhwyMSTkpEWFiBLUvzFP76W+WN+ZblwmjLJLt7y58= github.com/selectel/go-selvpcclient/v3 v3.1.1 h1:C1q2LqqosiapoLpnGITGmysg0YCSQYDo2Gh69CioevM= diff --git a/selectel/dbaas.go b/selectel/dbaas.go index a7048c25..9851d3c9 100644 --- a/selectel/dbaas.go +++ b/selectel/dbaas.go @@ -24,6 +24,7 @@ const ( mySQLDatastoreType = "mysql" mySQLNativeDatastoreType = "mysql_native" redisDatastoreType = "redis" + kafkaDatastoreType = "kafka" ) func getDBaaSClient(d *schema.ResourceData, meta interface{}) (*dbaas.API, diag.Diagnostics) { @@ -122,7 +123,7 @@ func waitForDBaaSDatastoreV1ActiveState( Refresh: dbaasDatastoreV1StateRefreshFunc(ctx, client, datastoreID), Timeout: timeout, Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, + MinTimeout: 20 * time.Second, } _, err := stateConf.WaitForState() @@ -610,3 +611,121 @@ func dbaasLogicalReplicationSlotV1DeleteStateRefreshFunc(ctx context.Context, cl return d, strconv.Itoa(http.StatusOK), err } } + +// Topics + +func waitForDBaaSTopicV1ActiveState( + ctx context.Context, client *dbaas.API, topicID string, timeout time.Duration, +) error { + pending := []string{ + string(dbaas.StatusPendingCreate), + string(dbaas.StatusPendingUpdate), + } + target := []string{ + string(dbaas.StatusActive), + } + + stateConf := &resource.StateChangeConf{ + Pending: pending, + Target: target, + Refresh: dbaasTopicV1StateRefreshFunc(ctx, client, topicID), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 20 * time.Second, + } + + _, err := stateConf.WaitForState() + if err != nil { + return fmt.Errorf( + "error waiting for the topic %s to become 'ACTIVE': %s", + topicID, err) + } + + return nil +} + +func dbaasTopicV1StateRefreshFunc(ctx context.Context, client *dbaas.API, topicID string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + d, err := client.Topic(ctx, topicID) + if err != nil { + return nil, "", err + } + + return d, string(d.Status), nil + } +} + +func dbaasTopicV1DeleteStateRefreshFunc(ctx context.Context, client *dbaas.API, topicID string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + d, err := client.Topic(ctx, topicID) + if err != nil { + var dbaasError *dbaas.DBaaSAPIError + if errors.As(err, &dbaasError) { + return d, strconv.Itoa(dbaasError.StatusCode()), nil + } + + return nil, "", err + } + + return d, strconv.Itoa(http.StatusOK), err + } +} + +// ACLs + +func waitForDBaaSACLV1ActiveState( + ctx context.Context, client *dbaas.API, aclID string, timeout time.Duration, +) error { + pending := []string{ + string(dbaas.StatusPendingCreate), + string(dbaas.StatusPendingUpdate), + } + target := []string{ + string(dbaas.StatusActive), + } + + stateConf := &resource.StateChangeConf{ + Pending: pending, + Target: target, + Refresh: dbaasACLV1StateRefreshFunc(ctx, client, aclID), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 15 * time.Second, + } + + _, err := stateConf.WaitForState() + if err != nil { + return fmt.Errorf( + "error waiting for the acl %s to become 'ACTIVE': %s", + aclID, err) + } + + return nil +} + +func dbaasACLV1StateRefreshFunc(ctx context.Context, client *dbaas.API, aclID string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + d, err := client.ACL(ctx, aclID) + if err != nil { + return nil, "", err + } + + return d, string(d.Status), nil + } +} + +func dbaasACLV1DeleteStateRefreshFunc(ctx context.Context, client *dbaas.API, aclID string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + d, err := client.ACL(ctx, aclID) + if err != nil { + var dbaasError *dbaas.DBaaSAPIError + if errors.As(err, &dbaasError) { + return d, strconv.Itoa(dbaasError.StatusCode()), nil + } + + return nil, "", err + } + + return d, strconv.Itoa(http.StatusOK), err + } +} diff --git a/selectel/provider.go b/selectel/provider.go index 053e2e3c..ab346f9e 100644 --- a/selectel/provider.go +++ b/selectel/provider.go @@ -9,6 +9,7 @@ import ( ) const ( + objectACL = "acl" objectFloatingIP = "floating IP" objectKeypair = "keypair" objectLicense = "license" @@ -17,6 +18,7 @@ const ( objectRole = "role" objectSubnet = "subnet" objectToken = "token" + objectTopic = "topic" objectUser = "user" objectCluster = "cluster" objectKubeConfig = "kubeconfig" @@ -136,6 +138,9 @@ func Provider() *schema.Provider { "selectel_dbaas_postgresql_extension_v1": resourceDBaaSPostgreSQLExtensionV1(), "selectel_dbaas_prometheus_metric_token_v1": resourceDBaaSPrometheusMetricTokenV1(), "selectel_dbaas_postgresql_logical_replication_slot_v1": resourceDBaaSPostgreSQLLogicalReplicationSlotV1(), + "selectel_dbaas_kafka_acl_v1": resourceDBaaSKafkaACLV1(), + "selectel_dbaas_kafka_datastore_v1": resourceDBaaSKafkaDatastoreV1(), + "selectel_dbaas_kafka_topic_v1": resourceDBaaSKafkaTopicV1(), "selectel_craas_registry_v1": resourceCRaaSRegistryV1(), "selectel_craas_token_v1": resourceCRaaSTokenV1(), }, diff --git a/selectel/resource_selectel_dbaas_kafka_acl_v1.go b/selectel/resource_selectel_dbaas_kafka_acl_v1.go new file mode 100644 index 00000000..5a7160e5 --- /dev/null +++ b/selectel/resource_selectel_dbaas_kafka_acl_v1.go @@ -0,0 +1,215 @@ +package selectel + +import ( + "context" + "errors" + "fmt" + "log" + "net/http" + "strconv" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/selectel/dbaas-go" +) + +func resourceDBaaSKafkaACLV1() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceDBaaSACLV1Create, + ReadContext: resourceDBaaSACLV1Read, + UpdateContext: resourceDBaaSACLV1Update, + DeleteContext: resourceDBaaSACLV1Delete, + Importer: &schema.ResourceImporter{ + StateContext: resourceDBaaSACLV1ImportState, + }, + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(60 * time.Minute), + Update: schema.DefaultTimeout(60 * time.Minute), + Delete: schema.DefaultTimeout(60 * time.Minute), + }, + Schema: map[string]*schema.Schema{ + "datastore_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "region": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "pattern": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "pattern_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{ + "literal", + "prefixed", + "all", + }, false), + }, + "user_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "allow_read": { + Type: schema.TypeBool, + Required: true, + }, + "allow_write": { + Type: schema.TypeBool, + Required: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "project_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + } +} + +func resourceDBaaSACLV1Create(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + dbaasClient, diagErr := getDBaaSClient(d, meta) + if diagErr != nil { + return diagErr + } + + aclCreateOpts := dbaas.ACLCreateOpts{ + DatastoreID: d.Get("datastore_id").(string), + UserID: d.Get("user_id").(string), + Pattern: d.Get("pattern").(string), + PatternType: d.Get("pattern_type").(string), + AllowRead: d.Get("allow_read").(bool), + AllowWrite: d.Get("allow_write").(bool), + } + + log.Print(msgCreate(objectACL, aclCreateOpts)) + acl, err := dbaasClient.CreateACL(ctx, aclCreateOpts) + if err != nil { + return diag.FromErr(errCreatingObject(objectACL, err)) + } + + log.Printf("[DEBUG] waiting for acl %s to become 'ACTIVE'", acl.ID) + timeout := d.Timeout(schema.TimeoutCreate) + err = waitForDBaaSACLV1ActiveState(ctx, dbaasClient, acl.ID, timeout) + if err != nil { + return diag.FromErr(errCreatingObject(objectACL, err)) + } + + d.SetId(acl.ID) + + return resourceDBaaSACLV1Read(ctx, d, meta) +} + +func resourceDBaaSACLV1Read(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + dbaasClient, diagErr := getDBaaSClient(d, meta) + if diagErr != nil { + return diagErr + } + + log.Print(msgGet(objectACL, d.Id())) + acl, err := dbaasClient.ACL(ctx, d.Id()) + if err != nil { + return diag.FromErr(errGettingObject(objectACL, d.Id(), err)) + } + d.Set("datastore_id", acl.DatastoreID) + if acl.Pattern != "" { + d.Set("pattern", acl.Pattern) + } + d.Set("pattern_type", acl.PatternType) + d.Set("allow_read", acl.AllowRead) + d.Set("allow_write", acl.AllowWrite) + d.Set("status", acl.Status) + + return nil +} + +func resourceDBaaSACLV1Update(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + dbaasClient, diagErr := getDBaaSClient(d, meta) + if diagErr != nil { + return diagErr + } + + if d.HasChange("allow_read") || d.HasChange("allow_write") { + allowRead := d.Get("allow_read").(bool) + allowWrite := d.Get("allow_write").(bool) + updateOpts := dbaas.ACLUpdateOpts{ + AllowRead: allowRead, + AllowWrite: allowWrite, + } + + log.Print(msgUpdate(objectACL, d.Id(), updateOpts)) + _, err := dbaasClient.UpdateACL(ctx, d.Id(), updateOpts) + if err != nil { + return diag.FromErr(errUpdatingObject(objectACL, d.Id(), err)) + } + + log.Printf("[DEBUG] waiting for acl %s to become 'ACTIVE'", d.Id()) + timeout := d.Timeout(schema.TimeoutCreate) + err = waitForDBaaSACLV1ActiveState(ctx, dbaasClient, d.Id(), timeout) + if err != nil { + return diag.FromErr(errUpdatingObject(objectACL, d.Id(), err)) + } + } + + return resourceDBaaSACLV1Read(ctx, d, meta) +} + +func resourceDBaaSACLV1Delete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + dbaasClient, diagErr := getDBaaSClient(d, meta) + if diagErr != nil { + return diagErr + } + + log.Print(msgDelete(objectACL, d.Id())) + err := dbaasClient.DeleteACL(ctx, d.Id()) + if err != nil { + return diag.FromErr(errDeletingObject(objectACL, d.Id(), err)) + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{strconv.Itoa(http.StatusOK)}, + Target: []string{strconv.Itoa(http.StatusNotFound)}, + Refresh: dbaasACLV1DeleteStateRefreshFunc(ctx, dbaasClient, d.Id()), + Timeout: d.Timeout(schema.TimeoutDelete), + Delay: 10 * time.Second, + MinTimeout: 20 * time.Second, + } + + log.Printf("[DEBUG] waiting for acl %s to become deleted", d.Id()) + _, err = stateConf.WaitForStateContext(ctx) + if err != nil { + return diag.FromErr(fmt.Errorf("error waiting for the acl %s to become deleted: %s", d.Id(), err)) + } + + return nil +} + +func resourceDBaaSACLV1ImportState(_ context.Context, d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*Config) + if config.ProjectID == "" { + return nil, errors.New("SEL_PROJECT_ID must be set for the resource import") + } + if config.Region == "" { + return nil, errors.New("SEL_REGION must be set for the resource import") + } + + d.Set("project_id", config.ProjectID) + d.Set("region", config.Region) + + return []*schema.ResourceData{d}, nil +} diff --git a/selectel/resource_selectel_dbaas_kafka_acl_v1_test.go b/selectel/resource_selectel_dbaas_kafka_acl_v1_test.go new file mode 100644 index 00000000..77283e93 --- /dev/null +++ b/selectel/resource_selectel_dbaas_kafka_acl_v1_test.go @@ -0,0 +1,196 @@ +package selectel + +import ( + "context" + "errors" + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/selectel/dbaas-go" + "github.com/selectel/go-selvpcclient/v3/selvpcclient/resell/v2/projects" +) + +func TestAccDBaaSKafkaACLV1Basic(t *testing.T) { + var ( + dbaasACL dbaas.ACL + project projects.Project + ) + + projectName := acctest.RandomWithPrefix("tf-acc") + datastoreName := acctest.RandomWithPrefix("tf-acc-ds") + userName := RandomWithPrefix("tf_acc_user") + userPassword := acctest.RandomWithPrefix("tf-acc-pass") + nodeCount := 1 + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccSelectelPreCheck(t) }, + ProviderFactories: testAccProviders, + CheckDestroy: testAccCheckVPCV2ProjectDestroy, + Steps: []resource.TestStep{ + { + Config: testAccDBaaSKafkaACLV1Basic(projectName, datastoreName, userName, userPassword, nodeCount), + Check: resource.ComposeTestCheckFunc( + testAccCheckVPCV2ProjectExists("selectel_vpc_project_v2.project_tf_acc_test_1", &project), + testAccCheckDBaaSKafkaACLV1Exists("selectel_dbaas_kafka_acl_v1.acl_tf_acc_test_1", &dbaasACL), + resource.TestCheckResourceAttr("selectel_dbaas_kafka_acl_v1.acl_tf_acc_test_1", "pattern_type", "prefixed"), + resource.TestCheckResourceAttr("selectel_dbaas_kafka_acl_v1.acl_tf_acc_test_1", "pattern", "topic"), + resource.TestCheckResourceAttr("selectel_dbaas_kafka_acl_v1.acl_tf_acc_test_1", "allow_read", "true"), + resource.TestCheckResourceAttr("selectel_dbaas_kafka_acl_v1.acl_tf_acc_test_1", "allow_write", "false"), + resource.TestCheckResourceAttr("selectel_dbaas_kafka_acl_v1.acl_tf_acc_test_1", "status", string(dbaas.StatusActive)), + ), + }, + { + Config: testAccDBaaSKafkaACLV1Update(projectName, datastoreName, userName, userPassword, nodeCount), + Check: resource.ComposeTestCheckFunc( + testAccCheckVPCV2ProjectExists("selectel_vpc_project_v2.project_tf_acc_test_1", &project), + testAccCheckDBaaSKafkaACLV1Exists("selectel_dbaas_kafka_acl_v1.acl_tf_acc_test_1", &dbaasACL), + resource.TestCheckResourceAttr("selectel_dbaas_kafka_acl_v1.acl_tf_acc_test_1", "pattern_type", "prefixed"), + resource.TestCheckResourceAttr("selectel_dbaas_kafka_acl_v1.acl_tf_acc_test_1", "pattern", "topic"), + resource.TestCheckResourceAttr("selectel_dbaas_kafka_acl_v1.acl_tf_acc_test_1", "allow_read", "false"), + resource.TestCheckResourceAttr("selectel_dbaas_kafka_acl_v1.acl_tf_acc_test_1", "allow_write", "true"), + resource.TestCheckResourceAttr("selectel_dbaas_kafka_acl_v1.acl_tf_acc_test_1", "status", string(dbaas.StatusActive)), + ), + }, + }, + }) +} + +func testAccCheckDBaaSKafkaACLV1Exists(n string, dbaasACL *dbaas.ACL) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("not found: %s", n) + } + + ctx := context.Background() + + dbaasClient, err := newTestDBaaSClient(ctx, rs, testAccProvider) + if err != nil { + return err + } + + acl, err := dbaasClient.ACL(ctx, rs.Primary.ID) + if err != nil { + return err + } + + if acl.ID != rs.Primary.ID { + return errors.New("acl not found") + } + + *dbaasACL = acl + + return nil + } +} + +func testAccDBaaSKafkaACLV1Basic(projectName, datastoreName, userName, userPassword string, nodeCount int) string { + return fmt.Sprintf(` +resource "selectel_vpc_project_v2" "project_tf_acc_test_1" { + name = "%s" +} + +resource "selectel_vpc_subnet_v2" "subnet_tf_acc_test_1" { + project_id = "${selectel_vpc_project_v2.project_tf_acc_test_1.id}" + region = "ru-3" +} + +data "selectel_dbaas_datastore_type_v1" "dt" { + project_id = "${selectel_vpc_project_v2.project_tf_acc_test_1.id}" + region = "ru-3" + filter { + engine = "kafka" + version = "3.5" + } +} + +resource "selectel_dbaas_kafka_datastore_v1" "datastore_tf_acc_test_1" { + name = "%s" + project_id = "${selectel_vpc_project_v2.project_tf_acc_test_1.id}" + region = "ru-3" + type_id = "${data.selectel_dbaas_datastore_type_v1.dt.datastore_types[0].id}" + subnet_id = "${selectel_vpc_subnet_v2.subnet_tf_acc_test_1.subnet_id}" + node_count = "%d" + flavor { + vcpus = 2 + ram = 8192 + disk = 32 + } +} + +resource "selectel_dbaas_user_v1" "user_tf_acc_test_1" { + project_id = "${selectel_vpc_project_v2.project_tf_acc_test_1.id}" + region = "ru-3" + datastore_id = "${selectel_dbaas_kafka_datastore_v1.datastore_tf_acc_test_1.id}" + name = "%s" + password = "%s" +} + +resource "selectel_dbaas_kafka_acl_v1" "acl_tf_acc_test_1" { + project_id = "${selectel_vpc_project_v2.project_tf_acc_test_1.id}" + region = "ru-3" + datastore_id = "${selectel_dbaas_kafka_datastore_v1.datastore_tf_acc_test_1.id}" + user_id = "${selectel_dbaas_user_v1.user_tf_acc_test_1.id}" + pattern = "topic" + pattern_type = "prefixed" + allow_read = true + allow_write = false +}`, projectName, datastoreName, nodeCount, userName, userPassword) +} + +func testAccDBaaSKafkaACLV1Update(projectName, datastoreName, userName, userPassword string, nodeCount int) string { + return fmt.Sprintf(` +resource "selectel_vpc_project_v2" "project_tf_acc_test_1" { + name = "%s" +} + +resource "selectel_vpc_subnet_v2" "subnet_tf_acc_test_1" { + project_id = "${selectel_vpc_project_v2.project_tf_acc_test_1.id}" + region = "ru-3" +} + +data "selectel_dbaas_datastore_type_v1" "dt" { + project_id = "${selectel_vpc_project_v2.project_tf_acc_test_1.id}" + region = "ru-3" + filter { + engine = "kafka" + version = "3.5" + } +} + +resource "selectel_dbaas_kafka_datastore_v1" "datastore_tf_acc_test_1" { + name = "%s" + project_id = "${selectel_vpc_project_v2.project_tf_acc_test_1.id}" + region = "ru-3" + type_id = "${data.selectel_dbaas_datastore_type_v1.dt.datastore_types[0].id}" + subnet_id = "${selectel_vpc_subnet_v2.subnet_tf_acc_test_1.subnet_id}" + node_count = "%d" + flavor { + vcpus = 2 + ram = 8192 + disk = 32 + } +} + +resource "selectel_dbaas_user_v1" "user_tf_acc_test_1" { + project_id = "${selectel_vpc_project_v2.project_tf_acc_test_1.id}" + region = "ru-3" + datastore_id = "${selectel_dbaas_kafka_datastore_v1.datastore_tf_acc_test_1.id}" + name = "%s" + password = "%s" +} + +resource "selectel_dbaas_kafka_acl_v1" "acl_tf_acc_test_1" { + project_id = "${selectel_vpc_project_v2.project_tf_acc_test_1.id}" + region = "ru-3" + datastore_id = "${selectel_dbaas_kafka_datastore_v1.datastore_tf_acc_test_1.id}" + user_id = "${selectel_dbaas_user_v1.user_tf_acc_test_1.id}" + pattern = "topic" + pattern_type = "prefixed" + allow_read = false + allow_write = true +}`, projectName, datastoreName, nodeCount, userName, userPassword) +} diff --git a/selectel/resource_selectel_dbaas_kafka_datastore_v1.go b/selectel/resource_selectel_dbaas_kafka_datastore_v1.go new file mode 100644 index 00000000..2897a16a --- /dev/null +++ b/selectel/resource_selectel_dbaas_kafka_datastore_v1.go @@ -0,0 +1,307 @@ +package selectel + +import ( + "context" + "errors" + "fmt" + "log" + "net/http" + "strconv" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/selectel/dbaas-go" +) + +func resourceDBaaSKafkaDatastoreV1() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceDBaaSKafkaDatastoreV1Create, + ReadContext: resourceDBaaSKafkaDatastoreV1Read, + UpdateContext: resourceDBaaSKafkaDatastoreV1Update, + DeleteContext: resourceDBaaSKafkaDatastoreV1Delete, + Importer: &schema.ResourceImporter{ + StateContext: resourceDBaaSKafkaDatastoreV1ImportState, + }, + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(60 * time.Minute), + Update: schema.DefaultTimeout(60 * time.Minute), + Delete: schema.DefaultTimeout(60 * time.Minute), + }, + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + "project_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "region": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "subnet_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "type_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "flavor_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ConflictsWith: []string{"flavor"}, + }, + "node_count": { + Type: schema.TypeInt, + Required: true, + }, + "enabled": { + Type: schema.TypeBool, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "connections": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "flavor": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + ConflictsWith: []string{"flavor_id"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "vcpus": { + Type: schema.TypeInt, + Required: true, + }, + "ram": { + Type: schema.TypeInt, + Required: true, + }, + "disk": { + Type: schema.TypeInt, + Required: true, + }, + }, + }, + }, + "firewall": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ips": { + Type: schema.TypeList, + Required: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "config": { + Type: schema.TypeMap, + Optional: true, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + } +} + +func resourceDBaaSKafkaDatastoreV1Create(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + dbaasClient, diagErr := getDBaaSClient(d, meta) + if diagErr != nil { + return diagErr + } + + flavorID, flavorIDOk := d.GetOk("flavor_id") + flavorRaw, flavorOk := d.GetOk("flavor") + + if !flavorIDOk && !flavorOk { + return diag.FromErr(errors.New("either 'flavor' or 'flavor_id' must be provided")) + } + + typeID := d.Get("type_id").(string) + diagErr = validateDatastoreType(ctx, []string{kafkaDatastoreType}, typeID, dbaasClient) + if diagErr != nil { + return diagErr + } + + datastoreCreateOpts := dbaas.DatastoreCreateOpts{ + Name: d.Get("name").(string), + TypeID: typeID, + SubnetID: d.Get("subnet_id").(string), + NodeCount: d.Get("node_count").(int), + Config: d.Get("config").(map[string]interface{}), + } + + if flavorOk { + flavorSet := flavorRaw.(*schema.Set) + flavor, err := resourceDBaaSDatastoreV1FlavorFromSet(flavorSet) + if err != nil { + return diag.FromErr(errParseDatastoreV1Flavor(err)) + } + + datastoreCreateOpts.Flavor = flavor + } + + if flavorIDOk { + datastoreCreateOpts.FlavorID = flavorID.(string) + } + + log.Print(msgCreate(objectDatastore, datastoreCreateOpts)) + datastore, err := dbaasClient.CreateDatastore(ctx, datastoreCreateOpts) + if err != nil { + return diag.FromErr(errCreatingObject(objectDatastore, err)) + } + + log.Printf("[DEBUG] waiting for datastore %s to become 'ACTIVE'", datastore.ID) + timeout := d.Timeout(schema.TimeoutCreate) + err = waitForDBaaSDatastoreV1ActiveState(ctx, dbaasClient, datastore.ID, timeout) + if err != nil { + return diag.FromErr(errCreatingObject(objectDatastore, err)) + } + + d.SetId(datastore.ID) + + return resourceDBaaSKafkaDatastoreV1Read(ctx, d, meta) +} + +func resourceDBaaSKafkaDatastoreV1Read(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + dbaasClient, diagErr := getDBaaSClient(d, meta) + if diagErr != nil { + return diagErr + } + + log.Print(msgGet(objectDatastore, d.Id())) + datastore, err := dbaasClient.Datastore(ctx, d.Id()) + if err != nil { + return diag.FromErr(errGettingObject(objectDatastore, d.Id(), err)) + } + d.Set("name", datastore.Name) + d.Set("status", datastore.Status) + d.Set("project_id", datastore.ProjectID) + d.Set("subnet_id", datastore.SubnetID) + d.Set("type_id", datastore.TypeID) + d.Set("node_count", datastore.NodeCount) + d.Set("enabled", datastore.Enabled) + d.Set("flavor_id", datastore.FlavorID) + + flavor := resourceDBaaSDatastoreV1FlavorToSet(datastore.Flavor) + if err := d.Set("flavor", flavor); err != nil { + log.Print(errSettingComplexAttr("flavor", err)) + } + + if err := d.Set("connections", datastore.Connection); err != nil { + log.Print(errSettingComplexAttr("connections", err)) + } + + configMap := make(map[string]string) + for key, value := range datastore.Config { + configMap[key] = convertFieldToStringByType(value) + } + if err := d.Set("config", configMap); err != nil { + log.Print(errSettingComplexAttr("config", err)) + } + + return nil +} + +func resourceDBaaSKafkaDatastoreV1Update(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + dbaasClient, diagErr := getDBaaSClient(d, meta) + if diagErr != nil { + return diagErr + } + + if d.HasChange("name") { + err := updateDatastoreName(ctx, d, dbaasClient) + if err != nil { + return diag.FromErr(err) + } + } + if d.HasChange("firewall") { + err := updateDatastoreFirewall(ctx, d, dbaasClient) + if err != nil { + return diag.FromErr(err) + } + } + if d.HasChange("node_count") || d.HasChange("flavor") || d.HasChange("flavor_id") { + err := resizeDatastore(ctx, d, dbaasClient) + if err != nil { + return diag.FromErr(err) + } + } + if d.HasChange("config") { + err := updateDatastoreConfig(ctx, d, dbaasClient) + if err != nil { + return diag.FromErr(err) + } + } + + return resourceDBaaSKafkaDatastoreV1Read(ctx, d, meta) +} + +func resourceDBaaSKafkaDatastoreV1Delete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + dbaasClient, diagErr := getDBaaSClient(d, meta) + if diagErr != nil { + return diagErr + } + + log.Print(msgDelete(objectDatastore, d.Id())) + err := dbaasClient.DeleteDatastore(ctx, d.Id()) + if err != nil { + return diag.FromErr(errDeletingObject(objectDatastore, d.Id(), err)) + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{strconv.Itoa(http.StatusOK)}, + Target: []string{strconv.Itoa(http.StatusNotFound)}, + Refresh: dbaasDatastoreV1DeleteStateRefreshFunc(ctx, dbaasClient, d.Id()), + Timeout: d.Timeout(schema.TimeoutDelete), + Delay: 10 * time.Second, + MinTimeout: 15 * time.Second, + } + + log.Printf("[DEBUG] waiting for datastore %s to become deleted", d.Id()) + _, err = stateConf.WaitForStateContext(ctx) + if err != nil { + return diag.FromErr(fmt.Errorf("error waiting for the datastore %s to become deleted: %s", d.Id(), err)) + } + + return nil +} + +func resourceDBaaSKafkaDatastoreV1ImportState(_ context.Context, d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*Config) + if config.ProjectID == "" { + return nil, errors.New("SEL_PROJECT_ID must be set for the resource import") + } + if config.Region == "" { + return nil, errors.New("SEL_REGION must be set for the resource import") + } + + d.Set("project_id", config.ProjectID) + d.Set("region", config.Region) + + return []*schema.ResourceData{d}, nil +} diff --git a/selectel/resource_selectel_dbaas_kafka_datastore_v1_test.go b/selectel/resource_selectel_dbaas_kafka_datastore_v1_test.go new file mode 100644 index 00000000..f0a73918 --- /dev/null +++ b/selectel/resource_selectel_dbaas_kafka_datastore_v1_test.go @@ -0,0 +1,203 @@ +package selectel + +import ( + "fmt" + "strconv" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/selectel/dbaas-go" + "github.com/selectel/go-selvpcclient/v3/selvpcclient/resell/v2/projects" +) + +func TestAccDBaaSKafkaDatastoreV1Basic(t *testing.T) { + var ( + dbaasDatastore dbaas.Datastore + project projects.Project + ) + + projectName := acctest.RandomWithPrefix("tf-acc") + datastoreName := acctest.RandomWithPrefix("tf-acc-ds") + nodeCount := 1 + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccSelectelPreCheck(t) }, + ProviderFactories: testAccProviders, + CheckDestroy: testAccCheckVPCV2ProjectDestroy, + Steps: []resource.TestStep{ + { + Config: testAccDBaaSKafkaDatastoreV1Basic(projectName, datastoreName, nodeCount), + Check: resource.ComposeTestCheckFunc( + testAccCheckVPCV2ProjectExists("selectel_vpc_project_v2.project_tf_acc_test_1", &project), + testAccCheckDBaaSDatastoreV1Exists("selectel_dbaas_kafka_datastore_v1.datastore_tf_acc_test_1", &dbaasDatastore), + resource.TestCheckResourceAttr("selectel_dbaas_kafka_datastore_v1.datastore_tf_acc_test_1", "name", datastoreName), + resource.TestCheckResourceAttr("selectel_dbaas_kafka_datastore_v1.datastore_tf_acc_test_1", "region", "ru-3"), + resource.TestCheckResourceAttr("selectel_dbaas_kafka_datastore_v1.datastore_tf_acc_test_1", "node_count", strconv.Itoa(nodeCount)), + resource.TestCheckResourceAttr("selectel_dbaas_kafka_datastore_v1.datastore_tf_acc_test_1", "enabled", "true"), + resource.TestCheckResourceAttr("selectel_dbaas_kafka_datastore_v1.datastore_tf_acc_test_1", "status", string(dbaas.StatusActive)), + resource.TestCheckResourceAttr("selectel_dbaas_kafka_datastore_v1.datastore_tf_acc_test_1", "flavor.0.vcpus", strconv.Itoa(2)), + resource.TestCheckResourceAttr("selectel_dbaas_kafka_datastore_v1.datastore_tf_acc_test_1", "flavor.0.ram", strconv.Itoa(8192)), + resource.TestCheckResourceAttr("selectel_dbaas_kafka_datastore_v1.datastore_tf_acc_test_1", "flavor.0.disk", strconv.Itoa(32)), + resource.TestCheckResourceAttr("selectel_dbaas_kafka_datastore_v1.datastore_tf_acc_test_1", "config.log.retention.ms", "1000"), + resource.TestCheckResourceAttrSet("selectel_dbaas_kafka_datastore_v1.datastore_tf_acc_test_1", "connections.master"), + resource.TestCheckResourceAttrSet("selectel_dbaas_kafka_datastore_v1.datastore_tf_acc_test_1", "connections.MASTER"), + ), + }, + { + Config: testAccDBaaSKafkaDatastoreV1UpdateConfig(projectName, datastoreName, nodeCount), + Check: resource.ComposeTestCheckFunc( + testAccCheckVPCV2ProjectExists("selectel_vpc_project_v2.project_tf_acc_test_1", &project), + testAccCheckDBaaSDatastoreV1Exists("selectel_dbaas_kafka_datastore_v1.datastore_tf_acc_test_1", &dbaasDatastore), + resource.TestCheckResourceAttr("selectel_dbaas_kafka_datastore_v1.datastore_tf_acc_test_1", "name", datastoreName), + resource.TestCheckResourceAttr("selectel_dbaas_kafka_datastore_v1.datastore_tf_acc_test_1", "region", "ru-3"), + resource.TestCheckResourceAttr("selectel_dbaas_kafka_datastore_v1.datastore_tf_acc_test_1", "node_count", strconv.Itoa(nodeCount)), + resource.TestCheckResourceAttr("selectel_dbaas_kafka_datastore_v1.datastore_tf_acc_test_1", "enabled", "true"), + resource.TestCheckResourceAttr("selectel_dbaas_kafka_datastore_v1.datastore_tf_acc_test_1", "status", string(dbaas.StatusActive)), + resource.TestCheckResourceAttr("selectel_dbaas_kafka_datastore_v1.datastore_tf_acc_test_1", "flavor.0.vcpus", strconv.Itoa(2)), + resource.TestCheckResourceAttr("selectel_dbaas_kafka_datastore_v1.datastore_tf_acc_test_1", "flavor.0.ram", strconv.Itoa(8192)), + resource.TestCheckResourceAttr("selectel_dbaas_kafka_datastore_v1.datastore_tf_acc_test_1", "flavor.0.disk", strconv.Itoa(32)), + resource.TestCheckResourceAttr("selectel_dbaas_kafka_datastore_v1.datastore_tf_acc_test_1", "config.log.retention.ms", "10000"), + resource.TestCheckResourceAttr("selectel_dbaas_kafka_datastore_v1.datastore_tf_acc_test_1", "config.log.retention.bytes", "1024"), + resource.TestCheckResourceAttrSet("selectel_dbaas_kafka_datastore_v1.datastore_tf_acc_test_1", "connections.master"), + resource.TestCheckResourceAttrSet("selectel_dbaas_kafka_datastore_v1.datastore_tf_acc_test_1", "connections.MASTER"), + ), + }, + { + Config: testAccDBaaSKafkaDatastoreV1Resize(projectName, datastoreName, nodeCount), + Check: resource.ComposeTestCheckFunc( + testAccCheckVPCV2ProjectExists("selectel_vpc_project_v2.project_tf_acc_test_1", &project), + testAccCheckDBaaSDatastoreV1Exists("selectel_dbaas_kafka_datastore_v1.datastore_tf_acc_test_1", &dbaasDatastore), + resource.TestCheckResourceAttr("selectel_dbaas_kafka_datastore_v1.datastore_tf_acc_test_1", "name", datastoreName), + resource.TestCheckResourceAttr("selectel_dbaas_kafka_datastore_v1.datastore_tf_acc_test_1", "region", "ru-3"), + resource.TestCheckResourceAttr("selectel_dbaas_kafka_datastore_v1.datastore_tf_acc_test_1", "node_count", strconv.Itoa(nodeCount)), + resource.TestCheckResourceAttr("selectel_dbaas_kafka_datastore_v1.datastore_tf_acc_test_1", "enabled", "true"), + resource.TestCheckResourceAttr("selectel_dbaas_kafka_datastore_v1.datastore_tf_acc_test_1", "status", string(dbaas.StatusActive)), + resource.TestCheckResourceAttr("selectel_dbaas_kafka_datastore_v1.datastore_tf_acc_test_1", "flavor.0.vcpus", strconv.Itoa(2)), + resource.TestCheckResourceAttr("selectel_dbaas_kafka_datastore_v1.datastore_tf_acc_test_1", "flavor.0.ram", strconv.Itoa(8192)), + resource.TestCheckResourceAttr("selectel_dbaas_kafka_datastore_v1.datastore_tf_acc_test_1", "flavor.0.disk", strconv.Itoa(64)), + resource.TestCheckResourceAttr("selectel_dbaas_kafka_datastore_v1.datastore_tf_acc_test_1", "config.log.retention.ms", "1000"), + resource.TestCheckResourceAttr("selectel_dbaas_kafka_datastore_v1.datastore_tf_acc_test_1", "config.log.retention.bytes", "1024"), + resource.TestCheckResourceAttrSet("selectel_dbaas_kafka_datastore_v1.datastore_tf_acc_test_1", "connections.master"), + resource.TestCheckResourceAttrSet("selectel_dbaas_kafka_datastore_v1.datastore_tf_acc_test_1", "connections.MASTER"), + ), + }, + }, + }) +} + +func testAccDBaaSKafkaDatastoreV1Basic(projectName, datastoreName string, nodeCount int) string { + return fmt.Sprintf(` +resource "selectel_vpc_project_v2" "project_tf_acc_test_1" { + name = "%s" +} + +resource "selectel_vpc_subnet_v2" "subnet_tf_acc_test_1" { + project_id = "${selectel_vpc_project_v2.project_tf_acc_test_1.id}" + region = "ru-3" +} + +data "selectel_dbaas_datastore_type_v1" "dt" { + project_id = "${selectel_vpc_project_v2.project_tf_acc_test_1.id}" + region = "ru-3" + filter { + engine = "kafka" + version = "3.5" + } +} + +resource "selectel_dbaas_kafka_datastore_v1" "datastore_tf_acc_test_1" { + name = "%s" + project_id = "${selectel_vpc_project_v2.project_tf_acc_test_1.id}" + region = "ru-3" + type_id = "${data.selectel_dbaas_datastore_type_v1.dt.datastore_types[0].id}" + subnet_id = "${selectel_vpc_subnet_v2.subnet_tf_acc_test_1.subnet_id}" + node_count = "%d" + flavor { + vcpus = 2 + ram = 8192 + disk = 32 + } + config = { + "log.retention.ms" = 1000 + } +}`, projectName, datastoreName, nodeCount) +} + +func testAccDBaaSKafkaDatastoreV1UpdateConfig(projectName, datastoreName string, nodeCount int) string { + return fmt.Sprintf(` +resource "selectel_vpc_project_v2" "project_tf_acc_test_1" { + name = "%s" +} + +resource "selectel_vpc_subnet_v2" "subnet_tf_acc_test_1" { + project_id = "${selectel_vpc_project_v2.project_tf_acc_test_1.id}" + region = "ru-3" +} + +data "selectel_dbaas_datastore_type_v1" "dt" { + project_id = "${selectel_vpc_project_v2.project_tf_acc_test_1.id}" + region = "ru-3" + filter { + engine = "kafka" + version = "3.5" + } +} + +resource "selectel_dbaas_kafka_datastore_v1" "datastore_tf_acc_test_1" { + name = "%s" + project_id = "${selectel_vpc_project_v2.project_tf_acc_test_1.id}" + region = "ru-3" + type_id = "${data.selectel_dbaas_datastore_type_v1.dt.datastore_types[0].id}" + subnet_id = "${selectel_vpc_subnet_v2.subnet_tf_acc_test_1.subnet_id}" + node_count = "%d" + flavor { + vcpus = 2 + ram = 8192 + disk = 32 + } + config = { + "log.retention.ms" = 10000 + "log.retention.bytes" = 1024 + } +}`, projectName, datastoreName, nodeCount) +} + +func testAccDBaaSKafkaDatastoreV1Resize(projectName, datastoreName string, nodeCount int) string { + return fmt.Sprintf(` +resource "selectel_vpc_project_v2" "project_tf_acc_test_1" { + name = "%s" +} + +resource "selectel_vpc_subnet_v2" "subnet_tf_acc_test_1" { + project_id = "${selectel_vpc_project_v2.project_tf_acc_test_1.id}" + region = "ru-3" +} + +data "selectel_dbaas_datastore_type_v1" "dt" { + project_id = "${selectel_vpc_project_v2.project_tf_acc_test_1.id}" + region = "ru-3" + filter { + engine = "kafka" + version = "3.5" + } +} + +resource "selectel_dbaas_kafka_datastore_v1" "datastore_tf_acc_test_1" { + name = "%s" + project_id = "${selectel_vpc_project_v2.project_tf_acc_test_1.id}" + region = "ru-3" + type_id = "${data.selectel_dbaas_datastore_type_v1.dt.datastore_types[0].id}" + subnet_id = "${selectel_vpc_subnet_v2.subnet_tf_acc_test_1.subnet_id}" + node_count = "%d" + flavor { + vcpus = 2 + ram = 8192 + disk = 64 + } + config = { + "log.retention.ms" = 10000 + "log.retention.bytes" = 1024 + } +}`, projectName, datastoreName, nodeCount) +} diff --git a/selectel/resource_selectel_dbaas_kafka_topic_v1.go b/selectel/resource_selectel_dbaas_kafka_topic_v1.go new file mode 100644 index 00000000..d37d7d95 --- /dev/null +++ b/selectel/resource_selectel_dbaas_kafka_topic_v1.go @@ -0,0 +1,186 @@ +package selectel + +import ( + "context" + "errors" + "fmt" + "log" + "net/http" + "strconv" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/selectel/dbaas-go" +) + +func resourceDBaaSKafkaTopicV1() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceDBaaSTopicV1Create, + ReadContext: resourceDBaaSTopicV1Read, + UpdateContext: resourceDBaaSTopicV1Update, + DeleteContext: resourceDBaaSTopicV1Delete, + Importer: &schema.ResourceImporter{ + StateContext: resourceDBaaSTopicV1ImportState, + }, + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(60 * time.Minute), + Update: schema.DefaultTimeout(60 * time.Minute), + Delete: schema.DefaultTimeout(60 * time.Minute), + }, + Schema: map[string]*schema.Schema{ + "datastore_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "region": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "partitions": { + Type: schema.TypeInt, + Required: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "project_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + } +} + +func resourceDBaaSTopicV1Create(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + dbaasClient, diagErr := getDBaaSClient(d, meta) + if diagErr != nil { + return diagErr + } + + topicCreateOpts := dbaas.TopicCreateOpts{ + DatastoreID: d.Get("datastore_id").(string), + Name: d.Get("name").(string), + Partitions: uint16(d.Get("partitions").(int)), + } + + log.Print(msgCreate(objectTopic, topicCreateOpts)) + topic, err := dbaasClient.CreateTopic(ctx, topicCreateOpts) + if err != nil { + return diag.FromErr(errCreatingObject(objectTopic, err)) + } + + log.Printf("[DEBUG] waiting for topic %s to become 'ACTIVE'", topic.ID) + timeout := d.Timeout(schema.TimeoutCreate) + err = waitForDBaaSTopicV1ActiveState(ctx, dbaasClient, topic.ID, timeout) + if err != nil { + return diag.FromErr(errCreatingObject(objectTopic, err)) + } + + d.SetId(topic.ID) + + return resourceDBaaSTopicV1Read(ctx, d, meta) +} + +func resourceDBaaSTopicV1Read(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + dbaasClient, diagErr := getDBaaSClient(d, meta) + if diagErr != nil { + return diagErr + } + + log.Print(msgGet(objectTopic, d.Id())) + topic, err := dbaasClient.Topic(ctx, d.Id()) + if err != nil { + return diag.FromErr(errGettingObject(objectTopic, d.Id(), err)) + } + d.Set("datastore_id", topic.DatastoreID) + d.Set("name", topic.Name) + d.Set("partitions", topic.Partitions) + d.Set("status", topic.Status) + + return nil +} + +func resourceDBaaSTopicV1Update(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + dbaasClient, diagErr := getDBaaSClient(d, meta) + if diagErr != nil { + return diagErr + } + + if d.HasChange("partitions") { + partitions := uint16(d.Get("partitions").(int)) + updateOpts := dbaas.TopicUpdateOpts{ + Partitions: partitions, + } + + log.Print(msgUpdate(objectTopic, d.Id(), updateOpts)) + _, err := dbaasClient.UpdateTopic(ctx, d.Id(), updateOpts) + if err != nil { + return diag.FromErr(errUpdatingObject(objectTopic, d.Id(), err)) + } + + log.Printf("[DEBUG] waiting for topic %s to become 'ACTIVE'", d.Id()) + timeout := d.Timeout(schema.TimeoutCreate) + err = waitForDBaaSTopicV1ActiveState(ctx, dbaasClient, d.Id(), timeout) + if err != nil { + return diag.FromErr(errUpdatingObject(objectTopic, d.Id(), err)) + } + } + + return resourceDBaaSTopicV1Read(ctx, d, meta) +} + +func resourceDBaaSTopicV1Delete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + dbaasClient, diagErr := getDBaaSClient(d, meta) + if diagErr != nil { + return diagErr + } + + log.Print(msgDelete(objectTopic, d.Id())) + err := dbaasClient.DeleteTopic(ctx, d.Id()) + if err != nil { + return diag.FromErr(errDeletingObject(objectTopic, d.Id(), err)) + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{strconv.Itoa(http.StatusOK)}, + Target: []string{strconv.Itoa(http.StatusNotFound)}, + Refresh: dbaasTopicV1DeleteStateRefreshFunc(ctx, dbaasClient, d.Id()), + Timeout: d.Timeout(schema.TimeoutDelete), + Delay: 10 * time.Second, + MinTimeout: 20 * time.Second, + } + + log.Printf("[DEBUG] waiting for topic %s to become deleted", d.Id()) + _, err = stateConf.WaitForStateContext(ctx) + if err != nil { + return diag.FromErr(fmt.Errorf("error waiting for the topic %s to become deleted: %s", d.Id(), err)) + } + + return nil +} + +func resourceDBaaSTopicV1ImportState(_ context.Context, d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*Config) + if config.ProjectID == "" { + return nil, errors.New("SEL_PROJECT_ID must be set for the resource import") + } + if config.Region == "" { + return nil, errors.New("SEL_REGION must be set for the resource import") + } + + d.Set("project_id", config.ProjectID) + d.Set("region", config.Region) + + return []*schema.ResourceData{d}, nil +} diff --git a/selectel/resource_selectel_dbaas_kafka_topic_v1_test.go b/selectel/resource_selectel_dbaas_kafka_topic_v1_test.go new file mode 100644 index 00000000..b270a1df --- /dev/null +++ b/selectel/resource_selectel_dbaas_kafka_topic_v1_test.go @@ -0,0 +1,171 @@ +package selectel + +import ( + "context" + "errors" + "fmt" + "strconv" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/selectel/dbaas-go" + "github.com/selectel/go-selvpcclient/v3/selvpcclient/resell/v2/projects" +) + +func TestAccDBaaSKafkaTopicV1Basic(t *testing.T) { + var ( + dbaasTopic dbaas.Topic + project projects.Project + ) + + projectName := acctest.RandomWithPrefix("tf-acc") + datastoreName := acctest.RandomWithPrefix("tf-acc-ds") + topicName := RandomWithPrefix("tf_acc_topic") + topicPartitions := 1 + nodeCount := 1 + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccSelectelPreCheck(t) }, + ProviderFactories: testAccProviders, + CheckDestroy: testAccCheckVPCV2ProjectDestroy, + Steps: []resource.TestStep{ + { + Config: testAccDBaaSKafkaTopicV1Basic(projectName, datastoreName, topicName, strconv.Itoa(topicPartitions), nodeCount), + Check: resource.ComposeTestCheckFunc( + testAccCheckVPCV2ProjectExists("selectel_vpc_project_v2.project_tf_acc_test_1", &project), + testAccCheckDBaaSKafkaTopicV1Exists("selectel_dbaas_kafka_topic_v1.topic_tf_acc_test_1", &dbaasTopic), + resource.TestCheckResourceAttr("selectel_dbaas_kafka_topic_v1.topic_tf_acc_test_1", "name", topicName), + resource.TestCheckResourceAttr("selectel_dbaas_kafka_topic_v1.topic_tf_acc_test_1", "partitions", strconv.Itoa(topicPartitions)), + resource.TestCheckResourceAttr("selectel_dbaas_kafka_topic_v1.topic_tf_acc_test_1", "status", string(dbaas.StatusActive)), + ), + }, + { + Config: testAccDBaaSKafkaTopicV1Update(projectName, datastoreName, topicName, strconv.Itoa(topicPartitions+1), nodeCount), + Check: resource.ComposeTestCheckFunc( + testAccCheckVPCV2ProjectExists("selectel_vpc_project_v2.project_tf_acc_test_1", &project), + testAccCheckDBaaSKafkaTopicV1Exists("selectel_dbaas_kafka_topic_v1.topic_tf_acc_test_1", &dbaasTopic), + resource.TestCheckResourceAttr("selectel_dbaas_kafka_topic_v1.topic_tf_acc_test_1", "name", topicName), + resource.TestCheckResourceAttr("selectel_dbaas_kafka_topic_v1.topic_tf_acc_test_1", "partitions", strconv.Itoa(topicPartitions+1)), + resource.TestCheckResourceAttr("selectel_dbaas_kafka_topic_v1.topic_tf_acc_test_1", "status", string(dbaas.StatusActive)), + ), + }, + }, + }) +} + +func testAccCheckDBaaSKafkaTopicV1Exists(n string, dbaasTopic *dbaas.Topic) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("not found: %s", n) + } + + ctx := context.Background() + + dbaasClient, err := newTestDBaaSClient(ctx, rs, testAccProvider) + if err != nil { + return err + } + + topic, err := dbaasClient.Topic(ctx, rs.Primary.ID) + if err != nil { + return err + } + + if topic.ID != rs.Primary.ID { + return errors.New("topic not found") + } + + *dbaasTopic = topic + + return nil + } +} + +func testAccDBaaSKafkaTopicV1Basic(projectName, datastoreName, topicName, topicPartitions string, nodeCount int) string { + return fmt.Sprintf(` +resource "selectel_vpc_project_v2" "project_tf_acc_test_1" { + name = "%s" +} + +resource "selectel_vpc_subnet_v2" "subnet_tf_acc_test_1" { + project_id = "${selectel_vpc_project_v2.project_tf_acc_test_1.id}" + region = "ru-3" +} + +data "selectel_dbaas_datastore_type_v1" "dt" { + project_id = "${selectel_vpc_project_v2.project_tf_acc_test_1.id}" + region = "ru-3" + filter { + engine = "kafka" + version = "3.5" + } +} + +resource "selectel_dbaas_kafka_datastore_v1" "datastore_tf_acc_test_1" { + name = "%s" + project_id = "${selectel_vpc_project_v2.project_tf_acc_test_1.id}" + region = "ru-3" + type_id = "${data.selectel_dbaas_datastore_type_v1.dt.datastore_types[0].id}" + subnet_id = "${selectel_vpc_subnet_v2.subnet_tf_acc_test_1.subnet_id}" + node_count = "%d" + flavor { + vcpus = 2 + ram = 8192 + disk = 32 + } +} + +resource "selectel_dbaas_kafka_topic_v1" "topic_tf_acc_test_1" { + project_id = "${selectel_vpc_project_v2.project_tf_acc_test_1.id}" + region = "ru-3" + datastore_id = "${selectel_dbaas_kafka_datastore_v1.datastore_tf_acc_test_1.id}" + name = "%s" + partitions = "%s" +}`, projectName, datastoreName, nodeCount, topicName, topicPartitions) +} + +func testAccDBaaSKafkaTopicV1Update(projectName, datastoreName, topicName, topicPartitions string, nodeCount int) string { + return fmt.Sprintf(` +resource "selectel_vpc_project_v2" "project_tf_acc_test_1" { + name = "%s" +} + +resource "selectel_vpc_subnet_v2" "subnet_tf_acc_test_1" { + project_id = "${selectel_vpc_project_v2.project_tf_acc_test_1.id}" + region = "ru-3" +} + +data "selectel_dbaas_datastore_type_v1" "dt" { + project_id = "${selectel_vpc_project_v2.project_tf_acc_test_1.id}" + region = "ru-3" + filter { + engine = "kafka" + version = "3.5" + } +} + +resource "selectel_dbaas_kafka_datastore_v1" "datastore_tf_acc_test_1" { + name = "%s" + project_id = "${selectel_vpc_project_v2.project_tf_acc_test_1.id}" + region = "ru-3" + type_id = "${data.selectel_dbaas_datastore_type_v1.dt.datastore_types[0].id}" + subnet_id = "${selectel_vpc_subnet_v2.subnet_tf_acc_test_1.subnet_id}" + node_count = "%d" + flavor { + vcpus = 2 + ram = 8192 + disk = 32 + } +} + +resource "selectel_dbaas_kafka_topic_v1" "topic_tf_acc_test_1" { + project_id = "${selectel_vpc_project_v2.project_tf_acc_test_1.id}" + region = "ru-3" + datastore_id = "${selectel_dbaas_kafka_datastore_v1.datastore_tf_acc_test_1.id}" + name = "%s" + partitions = "%s" +}`, projectName, datastoreName, nodeCount, topicName, topicPartitions) +} diff --git a/website/docs/r/dbaas_kafka_acl_v1.html.markdown.md b/website/docs/r/dbaas_kafka_acl_v1.html.markdown.md new file mode 100644 index 00000000..3299931b --- /dev/null +++ b/website/docs/r/dbaas_kafka_acl_v1.html.markdown.md @@ -0,0 +1,47 @@ +--- +layout: "selectel" +page_title: "Selectel: selectel_dbaas_kafka_ccl_v1" +sidebar_current: "docs-selectel-resource-dbaas-kafka-acl-v1" +description: |- + Creates and manages an ACL in Selectel Managed Databases using public API v1. +--- + +# selectel\_dbaas\_kafka\_acl\_v1 + +Creates and manages an access control list (ACL) in a Kafka datastore using public API v1. For more information about managing users in Kafka, see the [official Selectel documentation](https://docs.selectel.ru/cloud/managed-databases/kafka/manage-users/) + +## Example usage + +```hcl +resource "selectel_dbaas_kafka_acl_v1" "acl_1" { + project_id = selectel_vpc_project_v2.project_1.id + region = "ru-3" + datastore_id = selectel_dbaas_kafka_datastore_v1.datastore_1.id + pattern = "topic" + pattern_type = "prefixed" + allow_read = true + allow_write = true +} +``` + +## Argument Reference + +* `pattern` - (Optional) Name or prefix of a topic to which you provide access. Changing this creates a new ACL. Must be skipped when `pattern_type` is `all`. + +* `pattern_type` - (Required) ACL pattern type. Changing this creates a new ACL. Available ACL patterns are `prefixed`, `literal`, and `all`. When `pattern_type` is `all`, skip pattern. + +* `allow_read` - (Required) Allows to connect as a consumer. + +* `allow_write` - (Required) Allows to connect as a producer. + +* `project_id` - (Required) Unique identifier of the associated Cloud Platform project. Changing this creates a new user. Retrieved from the [selectel_vpc_project_v2](https://registry.terraform.io/providers/selectel/selectel/latest/docs/resources/vpc_project_v2) resource. Learn more about [Cloud Platform projects](https://docs.selectel.ru/cloud/managed-databases/about/projects/). + +* `region` - (Required) Pool where the database is located, for example, `ru-3`. Changing this creates a new ACL. Learn more about available pools in the [Availability matrix](https://docs.selectel.ru/control-panel-actions/availability-matrix/#managed-databases). + +* `datastore_id` - (Required) Unique identifier of the associated datastore. Changing this creates a new ACL. Retrieved from the [selectel_dbaas_kafka_datastore_v1](https://registry.terraform.io/providers/selectel/selectel/latest/docs/resources/dbaas_kafka_datastore_v1). + +* `user_id` - (Required) Unique identifier of the associated user. Changing this creates a new ACL. Retrieved from the [selectel_dbaas_user_v1](https://registry.terraform.io/providers/selectel/selectel/latest/docs/resources/dbaas_user_v1) resource. + +## Attributes Reference + +* `status` - ACL status. diff --git a/website/docs/r/dbaas_kafka_datastore_v1.html.markdown.md b/website/docs/r/dbaas_kafka_datastore_v1.html.markdown.md new file mode 100644 index 00000000..85d8da3b --- /dev/null +++ b/website/docs/r/dbaas_kafka_datastore_v1.html.markdown.md @@ -0,0 +1,90 @@ +--- +layout: "selectel" +page_title: "Selectel: selectel_dbaas_kafka_datastore_v1" +sidebar_current: "docs-selectel-resource-dbaas-kafka-datastore-v1" +description: |- + Creates and manages a Kafka datastore in Selectel Managed Databases using public API v1. +--- + +# selectel\_dbaas\_kafka\_datastore\_v1 + +Creates and manages a Kafka datastore using public API v1. For more information about Managed Databases, see the [official Selectel documentation](https://docs.selectel.ru/cloud/managed-databases/kafka/). + +## Example usage + +```hcl +resource "selectel_dbaas_kafka_datastore_v1" "datastore_1" { + name = "datastore-1" + project_id = selectel_vpc_project_v2.project_1.id + region = "ru-3" + type_id = data.selectel_dbaas_datastore_type_v1.datastore_type_1.datastore_types[0].iddatastore_types[0].id + subnet_id = selectel_vpc_subnet_v2.subnet.subnet_id + node_count = 1 + flavor { + vcpus = 2 + ram = 8192 + disk = 32 + } +} +``` + +## Argument Reference + +* `name` - (Required) Datastore name. Changing this creates a new datastore. + +* `project_id` - (Required) Unique identifier of the associated Cloud Platform project. Changing this creates a new datastore. Retrieved from the [selectel_vpc_project_v2](https://registry.terraform.io/providers/selectel/selectel/latest/docs/resources/vpc_project_v2) resource. Learn more about [Cloud Platform projects](https://docs.selectel.ru/cloud/managed-databases/about/projects/). + +* `region` - (Required) Pool where the database is located, for example, `ru-3`. Changing this creates a new datastore. Learn more about available pools in the [Availability matrix](https://docs.selectel.ru/control-panel-actions/availability-matrix/#managed-databases). + +* `subnet_id` - (Required) Unique identifier of the associated OpenStack network. Changing this creates a new datastore. Learn more about the [openstack_networking_network_v2](https://registry.terraform.io/providers/terraform-provider-openstack/openstack/latest/docs/data-sources/networking_network_v2) resource in the official OpenStack documentation. + +* `type_id` - (Required) Unique identifier of the datastore type. Changing this creates a new datastore. Retrieved from the [selectel_dbaas_datastore_type_v1](https://registry.terraform.io/providers/selectel/selectel/latest/docs/data-sources/dbaas_datastore_type_v1) data source. + +* `node_count` - (Required) Number of replicas in the datastore. The only available value is 1. Learn more about [Replication](https://docs.selectel.ru/cloud/managed-databases/about/about-managed-databases/#отказоустойчивость-и-репликация). + +* `flavor_id` - (Optional) Unique identifier of the flavor for the datastore. Can be skipped when `flavor` is set. You can retrieve information about available flavors with the [selectel_dbaas_flavor_v1](https://registry.terraform.io/providers/selectel/selectel/latest/docs/data-sources/dbaas_flavor_v1) data source. + +* `flavor` - (Optional) Flavor configuration for the datastore. You can retrieve information about available flavors with the [selectel_dbaas_flavor_v1](https://registry.terraform.io/providers/selectel/selectel/latest/docs/data-sources/dbaas_flavor_v1) data source. Learn more about available configurations for [Kafka](https://docs.selectel.ru/cloud/managed-databases/kafka/configurations/). + + * `vcpus` - (Required) Number of vCPU cores. + + * `ram` - (Required) Amount of RAM in MB. + + * `disk` - (Required) Volume size in GB. + +* `firewall` - (Optional) List of IP-addresses with access to the datastore. + +* `config` - (Optional) Configuration parameters for the datastore. You can retrieve information about available configuration parameters with the [selectel_dbaas_configuration_parameter_v1](https://registry.terraform.io/providers/selectel/selectel/latest/docs/data-sources/dbaas_configuration_parameter_v1) data source. + +## Attributes Reference + +* `status` - Datastore status. + +* `connections` - DNS addresses to connect to the datastore. + +## Import + +You can import a datastore: + +```shell +export OS_DOMAIN_NAME= +export OS_USERNAME= +export OS_PASSWORD= +export SEL_PROJECT_ID= +export SEL_REGION= +terraform import selectel_dbaas_kafka_datastore_v1.datastore_1 +``` + +where: + +* `` — Selectel account ID. The account ID is in the top right corner of the [Control panel](https://my.selectel.ru/). Learn more about [Registration](https://docs.selectel.ru/control-panel-actions/account/registration/). + +* `` — Name of the service user. To get the name, in the top right corner of the [Control panel](https://my.selectel.ru/profile/users_management/users?type=service), go to the account menu ⟶ **Profile and Settings** ⟶ **User management** ⟶ the **Service users** tab ⟶ copy the name of the required user. Learn more about [Service users](https://docs.selectel.ru/control-panel-actions/users-and-roles/user-types-and-roles/). + +* `` — Password of the service user. + +* `` — Unique identifier of the associated Cloud Platform project. To get the project ID, in the [Control panel](https://my.selectel.ru/vpc/), go to **Cloud Platform** ⟶ project name ⟶ copy the ID of the required project. Learn more about [Cloud Platform projects](https://docs.selectel.ru/cloud/managed-databases/about/projects/). + +* `` — Pool where the cluster is located, for example, `ru-3`. To get information about the pool, in the [Control panel](https://my.selectel.ru/vpc/dbaas/), go to **Cloud Platform** ⟶ **Managed Databases**. The pool is in the **Pool** column. + +* `` — Unique identifier of the datastore, for example, `b311ce58-2658-46b5-b733-7a0f418703f2`. To get the datastore ID, in the [Control panel](https://my.selectel.ru/vpc/dbaas/), go to **Cloud Platform** ⟶ **Managed Databases** ⟶ copy the ID under the cluster name. \ No newline at end of file diff --git a/website/docs/r/dbaas_kafka_topic_v1.html.markdown b/website/docs/r/dbaas_kafka_topic_v1.html.markdown new file mode 100644 index 00000000..2e484848 --- /dev/null +++ b/website/docs/r/dbaas_kafka_topic_v1.html.markdown @@ -0,0 +1,66 @@ +--- +layout: "selectel" +page_title: "Selectel: selectel_dbaas_kafka_topic_v1" +sidebar_current: "docs-selectel-resource-dbaas-kafka-topic-v1" +description: |- + Creates and manages a topic in Selectel Managed Databases using public API v1. +--- + +# selectel\_dbaas\_kafka\_topic\_v1 + +Creates and manages a topic in a Kafka datastore using public API v1. For more information about managing topics in Kafka, see the [official Selectel documentation](https://docs.selectel.ru/cloud/managed-databases/kafka/manage-topics/) + +## Example usage + +```hcl +resource "selectel_dbaas_kafka_topic_v1" "topic_1" { + project_id = selectel_vpc_project_v2.project_1.id + region = "ru-3" + datastore_id = selectel_dbaas_kafka_datastore_v1.datastore_1.id + name = "topic" + partitions = 1 +} +``` + +## Argument Reference + +* `name` - (Required, Sensitive) Topic name. Changing this creates a new topic. + +* `partitions` - (Required) Number of partitions in a topic. The available range is from 1 to 4 000. You cannot increase the number of partitions in the existing topic. Learn more about [Partitions](https://docs.selectel.ru/cloud/managed-databases/kafka/manage-topics/#partitions) + +* `project_id` - (Required) Unique identifier of the associated Cloud Platform project. Changing this creates a new user. Retrieved from the [selectel_vpc_project_v2](https://registry.terraform.io/providers/selectel/selectel/latest/docs/resources/vpc_project_v2) resource. Learn more about [Cloud Platform projects](https://docs.selectel.ru/cloud/managed-databases/about/projects/). + +* `region` - (Required) Pool where the database is located, for example, `ru-3`. Changing this creates a new topic. Learn more about available pools in the [Availability matrix](https://docs.selectel.ru/control-panel-actions/availability-matrix/#managed-databases). + +* `datastore_id` - (Required) Unique identifier of the associated datastore. Changing this creates a new topic. Retrieved from the [selectel_dbaas_kafka_datastore_v1](https://registry.terraform.io/providers/selectel/selectel/latest/docs/resources/dbaas_kafka_datastore_v1). + +## Attributes Reference + +* `status` - Topic status. + +## Import + +You can import a topic: + +```shell +export OS_DOMAIN_NAME= +export OS_USERNAME= +export OS_PASSWORD= +export SEL_PROJECT_ID= +export SEL_REGION= +terraform import selectel_dbaas_kafka_topic_v1.topic_1 +``` + +where: + +* `` — Selectel account ID. The account ID is in the top right corner of the [Control panel](https://my.selectel.ru/). Learn more about [Registration](https://docs.selectel.ru/control-panel-actions/account/registration/). + +* `` — Name of the service user. To get the name, in the top right corner of the [Control panel](https://my.selectel.ru/profile/users_management/users?type=service), go to the account menu ⟶ **Profile and Settings** ⟶ **User management** ⟶ the **Service users** tab ⟶ copy the name of the required user. Learn more about [Service users](https://docs.selectel.ru/control-panel-actions/users-and-roles/user-types-and-roles/). + +* `` — Password of the service user. + +* `` — Unique identifier of the associated Cloud Platform project. To get the project ID, in the [Control panel](https://my.selectel.ru/vpc/), go to **Cloud Platform** ⟶ project name ⟶ copy the ID of the required project. Learn more about [Cloud Platform projects](https://docs.selectel.ru/cloud/managed-databases/about/projects/). + +* `` — Pool where the cluster is located, for example, `ru-3`. To get information about the pool, in the [Control panel](https://my.selectel.ru/vpc/dbaas/), go to **Cloud Platform** ⟶ **Managed Databases**. The pool is in the **Pool** column. + +* `` — Unique identifier of the user, for example, `b311ce58-2658-46b5-b733-7a0f418703f2`. To get the user ID in the [Control panel](https://my.selectel.ru/vpc/dbaas/), go to **Cloud Platform** ⟶ **Managed Databases** ⟶ the cluster page ⟶ the **Users** tab. The user ID is under the user name. \ No newline at end of file diff --git a/website/docs/r/dbaas_redis_datastore_v1.html.markdown b/website/docs/r/dbaas_redis_datastore_v1.html.markdown index 9bc4a2e5..eab575b3 100644 --- a/website/docs/r/dbaas_redis_datastore_v1.html.markdown +++ b/website/docs/r/dbaas_redis_datastore_v1.html.markdown @@ -17,7 +17,7 @@ resource "selectel_dbaas_redis_datastore_v1" "datastore_1" { name = "datastore-1" project_id = selectel_vpc_project_v2.project_1.id region = "ru-3" - type_id = data.selectel_dbaas_datastore_type_v1.dt.datastore_types[0].id + type_id = data.selectel_dbaas_datastore_type_v1.datastore_type_1.datastore_types[0].iddatastore_types[0].id subnet_id = selectel_vpc_subnet_v2.subnet.subnet_id node_count = 3 flavor_id = data.selectel_dbaas_flavor_v1.flavor.flavors[0].id diff --git a/website/selectel.erb b/website/selectel.erb index f6bb4f35..702c0411 100644 --- a/website/selectel.erb +++ b/website/selectel.erb @@ -104,19 +104,19 @@ selectel_dbaas_database_v1 > - selectel_dbaas_database_v1 + selectel_dbaas_mysql_database_v1 > - selectel_dbaas_database_v1 + selectel_dbaas_postgresql_database_v1 > - selectel_dbaas_datastore_v1 + selectel_dbaas_mysql_datastore_v1 > - selectel_dbaas_datastore_v1 + selectel_dbaas_postgresql_datastore_v1 > - selectel_dbaas_datastore_v1 + selectel_dbaas_redis_datastore_v1 > selectel_dbaas_datastore_v1 @@ -125,7 +125,7 @@ selectel_dbaas_extension_v1 > - selectel_dbaas_extension_v1 + selectel_dbaas_postgresql_extension_v1 > selectel_dbaas_grant_v1 @@ -139,6 +139,15 @@ > selectel_dbaas_logical_replication_slot_v1 + > + selectel_dbaas_kafka_acl_v1 + + > + selectel_dbaas_kafka_datastore_v1 + + > + selectel_dbaas_kafka_topic_v1 +