From 8ff587954c51e9a9afb9a720803ed125e4a50d3d Mon Sep 17 00:00:00 2001 From: wuzhuanhong Date: Sun, 7 Apr 2024 18:11:41 +0800 Subject: [PATCH] feat(dli): add new datasource to get list of elastic resource pools (#4532) --- .../dli_elastic_resource_pools.md | 78 ++++++ huaweicloud/provider.go | 1 + ...eicloud_dli_elastic_resource_pools_test.go | 181 +++++++++++++ ..._huaweicloud_dli_elastic_resource_pools.go | 240 ++++++++++++++++++ 4 files changed, 500 insertions(+) create mode 100644 docs/data-sources/dli_elastic_resource_pools.md create mode 100644 huaweicloud/services/acceptance/dli/data_source_huaweicloud_dli_elastic_resource_pools_test.go create mode 100644 huaweicloud/services/dli/data_source_huaweicloud_dli_elastic_resource_pools.go diff --git a/docs/data-sources/dli_elastic_resource_pools.md b/docs/data-sources/dli_elastic_resource_pools.md new file mode 100644 index 0000000000..b7f09d5c02 --- /dev/null +++ b/docs/data-sources/dli_elastic_resource_pools.md @@ -0,0 +1,78 @@ +--- +subcategory: "Data Lake Insight (DLI)" +--- + +# huaweicloud_dli_elastic_resource_pools + +Use this data source to get the list of DLI elastic resource pools within HuaweiCloud. + +## Example Usage + +```hcl +variable "resoure_pool_name" {} + +data "huaweicloud_dli_elastic_resource_pools" "test" { + name = var.resoure_pool_name +} +``` + +## Argument Reference + +The following arguments are supported: + +* `region` - (Optional, String) Specifies the region in which to query the resource. + If omitted, the provider-level region will be used. + +* `name` - (Optional, String) Specifies the name of the elastic resource pool. + +* `status` - (Optional, String) Specifies the status of the elastic resource pool. + The valid values are as follows: + + **available** + + **failed** + +* `tags` - (Optional, Map) Specifies the key/value pairs to associate with the elastic resource pool. + +## Attribute Reference + +In addition to all arguments above, the following attributes are exported: + +* `id` - The data source ID. + +* `elastic_resource_pools` - All elastic resource pools that match the filter parameters. + + The [elastic_resource_pools](#elastic_resource_pools_struct) structure is documented below. + + +The `elastic_resource_pools` block supports: + +* `id` - The elastic resource pool ID. + +* `name` - The elastic resource pool name. + +* `max_cu` - The maximum CUs number of the elastic resource pool. + +* `min_cu` - The minimum CUs number of the elastic resource pool. + +* `current_cu` - The current CUs number of the elastic resource pool. + +* `actual_cu` - The actual CUs number of the elastic resource pool. + +* `cidr` - The CIDR block of network to associate with the elastic resource pool. + +* `resource_id` - The resource ID of the elastic resource pool. + +* `enterprise_project_id` - The enterprise project ID corresponding to the elastic resource pool. + +* `queues` - The list of queues association with the elastic resource pool. + +* `description` - The description of the elastic resource pool. + +* `status` - The current status of the elastic resource pool. + +* `owner` - The account name for creating elastic resource pool. + +* `manager` - The type of the elastic resource pool. + +* `fail_reason` - The reason of elastic resource pool creation failed. + +* `created_at` - The creation time of the elastic resource pool. diff --git a/huaweicloud/provider.go b/huaweicloud/provider.go index 57741bc259..71ced1f021 100644 --- a/huaweicloud/provider.go +++ b/huaweicloud/provider.go @@ -485,6 +485,7 @@ func Provider() *schema.Provider { "huaweicloud_dli_datasource_auths": dli.DataSourceAuths(), "huaweicloud_dli_datasource_connections": dli.DataSourceConnections(), + "huaweicloud_dli_elastic_resource_pools": dli.DataSourceDliElasticPools(), "huaweicloud_dli_quotas": dli.DataSourceDliQuotas(), "huaweicloud_dms_kafka_flavors": dms.DataSourceKafkaFlavors(), diff --git a/huaweicloud/services/acceptance/dli/data_source_huaweicloud_dli_elastic_resource_pools_test.go b/huaweicloud/services/acceptance/dli/data_source_huaweicloud_dli_elastic_resource_pools_test.go new file mode 100644 index 0000000000..b41a43adcd --- /dev/null +++ b/huaweicloud/services/acceptance/dli/data_source_huaweicloud_dli_elastic_resource_pools_test.go @@ -0,0 +1,181 @@ +package dli + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + + "github.com/huaweicloud/terraform-provider-huaweicloud/huaweicloud/services/acceptance" +) + +func TestAccDataSourceDliElasticResourcePools_basic(t *testing.T) { + var ( + dataSource = "data.huaweicloud_dli_elastic_resource_pools.test" + rName = acceptance.RandomAccResourceName() + dc = acceptance.InitDataSourceCheck(dataSource) + + byName = "data.huaweicloud_dli_elastic_resource_pools.filter_by_name" + dcByName = acceptance.InitDataSourceCheck(byName) + + byNameNotFound = "data.huaweicloud_dli_elastic_resource_pools.filter_by_name_not_found" + dcByNameNotFound = acceptance.InitDataSourceCheck(byNameNotFound) + + byStatus = "data.huaweicloud_dli_elastic_resource_pools.filter_by_name_not_found" + dcByStatus = acceptance.InitDataSourceCheck(byStatus) + + byTags = "data.huaweicloud_dli_elastic_resource_pools.filter_by_tags" + dcByTags = acceptance.InitDataSourceCheck(byTags) + ) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acceptance.TestAccPreCheck(t) + }, + ProviderFactories: acceptance.TestAccProviderFactories, + Steps: []resource.TestStep{ + { + Config: testDataSourceDataSourceDliElasticPools_basic(rName), + Check: resource.ComposeTestCheckFunc( + dc.CheckResourceExists(), + + dcByName.CheckResourceExists(), + resource.TestCheckOutput("is_name_filter_useful", "true"), + resource.TestCheckResourceAttrSet(byName, "elastic_resource_pools.0.id"), + resource.TestCheckResourceAttr(byName, "elastic_resource_pools.0.name", rName), + resource.TestCheckResourceAttr(byName, "elastic_resource_pools.0.max_cu", "64"), + resource.TestCheckResourceAttr(byName, "elastic_resource_pools.0.min_cu", "64"), + resource.TestCheckResourceAttr(byName, "elastic_resource_pools.0.current_cu", "64"), + resource.TestCheckResourceAttr(byName, "elastic_resource_pools.0.cidr", "172.16.0.0/12"), + resource.TestCheckResourceAttr(byName, "elastic_resource_pools.0.enterprise_project_id", "0"), + resource.TestCheckResourceAttr(byName, "elastic_resource_pools.0.queues.0", rName), + resource.TestCheckResourceAttr(byName, "elastic_resource_pools.0.description", "Created by terraform script"), + resource.TestCheckResourceAttrSet(byName, "elastic_resource_pools.0.resource_id"), + resource.TestCheckResourceAttrSet(byName, "elastic_resource_pools.0.owner"), + resource.TestCheckResourceAttrSet(byName, "elastic_resource_pools.0.manager"), + resource.TestCheckResourceAttrSet(byName, "elastic_resource_pools.0.created_at"), + + dcByNameNotFound.CheckResourceExists(), + resource.TestCheckOutput("is_name_filter_useful_not_found", "true"), + + dcByStatus.CheckResourceExists(), + resource.TestCheckOutput("is_name_filter_useful_not_found", "true"), + + dcByStatus.CheckResourceExists(), + resource.TestCheckOutput("is_status_filter_useful", "true"), + + dcByTags.CheckResourceExists(), + resource.TestCheckOutput("is_tags_filter_useful", "true"), + + waitForDeletionCooldownComplete(), + ), + }, + }, + }) +} + +func testDataSourceDataSourceDliElasticPools_basic(name string) string { + return fmt.Sprintf(` +locals { + tags = { + foo = "bar" + terraform = "elastic_resource_pool" + } +} + +resource "huaweicloud_dli_elastic_resource_pool" "test" { + name = "%[1]s" + max_cu = 64 + min_cu = 64 + enterprise_project_id = "0" + description = "Created by terraform script" + tags = local.tags +} + +resource "huaweicloud_dli_queue" "test" { + depends_on = [ + huaweicloud_dli_elastic_resource_pool.test, + ] + + name = "%[1]s" + cu_count = 16 + resource_mode = 1 + elastic_resource_pool_name = huaweicloud_dli_elastic_resource_pool.test.name +} + +data "huaweicloud_dli_elastic_resource_pools" "test" { + depends_on = [ + huaweicloud_dli_elastic_resource_pool.test, + huaweicloud_dli_queue.test + ] +} + +data "huaweicloud_dli_elastic_resource_pools" "filter_by_name" { + depends_on = [ + huaweicloud_dli_elastic_resource_pool.test, + huaweicloud_dli_queue.test + ] + + name = "%[1]s" +} + +locals { + name_filter_result = [ + for v in data.huaweicloud_dli_elastic_resource_pools.filter_by_name.elastic_resource_pools[*].name : v == "%[1]s" + ] +} + +output "is_name_filter_useful" { + value = length(local.name_filter_result) == 1 && alltrue(local.name_filter_result) +} + +data "huaweicloud_dli_elastic_resource_pools" "filter_by_name_not_found" { + depends_on = [ + huaweicloud_dli_elastic_resource_pool.test, + huaweicloud_dli_queue.test + ] + + name = "not_found" +} + +output "is_name_filter_useful_not_found" { + value = length(data.huaweicloud_dli_elastic_resource_pools.filter_by_name_not_found.elastic_resource_pools) == 0 +} + +locals { + status = huaweicloud_dli_elastic_resource_pool.test.status +} + +data "huaweicloud_dli_elastic_resource_pools" "filter_by_status" { + depends_on = [ + huaweicloud_dli_elastic_resource_pool.test, + huaweicloud_dli_queue.test + ] + + status = local.status +} + +locals { + status_filter_result = [ + for v in data.huaweicloud_dli_elastic_resource_pools.filter_by_status.elastic_resource_pools[*].status : v == local.status + ] +} + +output "is_status_filter_useful" { + value = length(local.status_filter_result) > 0 && alltrue(local.status_filter_result) +} + +data "huaweicloud_dli_elastic_resource_pools" "filter_by_tags" { + depends_on = [ + huaweicloud_dli_elastic_resource_pool.test, + huaweicloud_dli_queue.test + ] + + tags = local.tags +} + +output "is_tags_filter_useful" { + value = length(data.huaweicloud_dli_elastic_resource_pools.filter_by_tags) >= 1 +} +`, name) +} diff --git a/huaweicloud/services/dli/data_source_huaweicloud_dli_elastic_resource_pools.go b/huaweicloud/services/dli/data_source_huaweicloud_dli_elastic_resource_pools.go new file mode 100644 index 0000000000..7185520c9e --- /dev/null +++ b/huaweicloud/services/dli/data_source_huaweicloud_dli_elastic_resource_pools.go @@ -0,0 +1,240 @@ +// Generated by PMS #67 +package dli + +import ( + "context" + "strings" + + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/tidwall/gjson" + + "github.com/huaweicloud/terraform-provider-huaweicloud/huaweicloud/config" + "github.com/huaweicloud/terraform-provider-huaweicloud/huaweicloud/helper/httphelper" + "github.com/huaweicloud/terraform-provider-huaweicloud/huaweicloud/helper/schemas" + "github.com/huaweicloud/terraform-provider-huaweicloud/huaweicloud/utils" +) + +func DataSourceDliElasticPools() *schema.Resource { + return &schema.Resource{ + ReadContext: dataSourceDliElasticPoolsRead, + + Schema: map[string]*schema.Schema{ + "region": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: `Specifies the region in which to query the resource. If omitted, the provider-level region will be used.`, + }, + "name": { + Type: schema.TypeString, + Optional: true, + Description: `Specifies the name of the elastic resource pool.`, + }, + "status": { + Type: schema.TypeString, + Optional: true, + Description: `Specifies the status of the elastic resource pool.`, + }, + "tags": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `Specifies the key/value pairs to associate with the elastic resource pool.`, + }, + "elastic_resource_pools": { + Type: schema.TypeSet, + Computed: true, + Description: `All elastic resource pools that match the filter parameters.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeInt, + Computed: true, + Description: `The elastic resource pool ID.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The elastic resource pool name.`, + }, + "max_cu": { + Type: schema.TypeInt, + Computed: true, + Description: `The maximum CUs number of the elastic resource pool.`, + }, + "min_cu": { + Type: schema.TypeInt, + Computed: true, + Description: `The minimum CUs number of the elastic resource pool.`, + }, + "current_cu": { + Type: schema.TypeInt, + Computed: true, + Description: `The current CUs number of the elastic resource pool.`, + }, + "actual_cu": { + Type: schema.TypeInt, + Computed: true, + Description: `The actual CUs number of the elastic resource pool.`, + }, + "cidr": { + Type: schema.TypeString, + Computed: true, + Description: `The CIDR block of network to associate with the elastic resource pool.`, + }, + "resource_id": { + Type: schema.TypeString, + Computed: true, + Description: `The resource ID of the elastic resource pool.`, + }, + "enterprise_project_id": { + Type: schema.TypeString, + Computed: true, + Description: `The enterprise project ID corresponding to the elastic resource pool.`, + }, + "queues": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `The list of queues association with the elastic resource pool.`, + }, + "description": { + Type: schema.TypeString, + Computed: true, + Description: `The description of the elastic resource pool.`, + }, + "status": { + Type: schema.TypeString, + Computed: true, + Description: `The current status of the elastic resource pool.`, + }, + "owner": { + Type: schema.TypeString, + Computed: true, + Description: `The account name for creating elastic resource pool.`, + }, + "manager": { + Type: schema.TypeString, + Computed: true, + Description: `The type of the elastic resource pool.`, + }, + "fail_reason": { + Type: schema.TypeString, + Computed: true, + Description: `The reason of elastic resource pool creation failed.`, + }, + "created_at": { + Type: schema.TypeString, + Computed: true, + Description: `The creation time of the elastic resource pool.`, + }, + }, + }, + }, + }, + } +} + +type ElasticPoolsDSWrapper struct { + *schemas.ResourceDataWrapper + Config *config.Config +} + +func newElasticPoolsDSWrapper(d *schema.ResourceData, meta interface{}) *ElasticPoolsDSWrapper { + return &ElasticPoolsDSWrapper{ + ResourceDataWrapper: schemas.NewSchemaWrapper(d), + Config: meta.(*config.Config), + } +} + +func dataSourceDliElasticPoolsRead(_ context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + wrapper := newElasticPoolsDSWrapper(d, meta) + lisElaResPooRst, err := wrapper.ListElasticResourcePools() + if err != nil { + return diag.FromErr(err) + } + + id, _ := uuid.GenerateUUID() + d.SetId(id) + + err = wrapper.listElasticResourcePoolsToSchema(lisElaResPooRst) + if err != nil { + return diag.FromErr(err) + } + + return nil +} + +// @API DLI GET /v3/{project_id}/elastic-resource-pools +func (w *ElasticPoolsDSWrapper) ListElasticResourcePools() (*gjson.Result, error) { + client, err := w.NewClient(w.Config, "dli") + if err != nil { + return nil, err + } + + uri := "/v3/{project_id}/elastic-resource-pools" + params := map[string]any{ + "name": w.Get("name"), + "status": w.Get("status"), + "tags": w.getTags(), + } + params = utils.RemoveNil(params) + return httphelper.New(client). + Method("GET"). + URI(uri). + Query(params). + OffsetPager("elastic_resource_pools", "offset", "limit", 100). + Request(). + Result() +} + +func (w *ElasticPoolsDSWrapper) listElasticResourcePoolsToSchema(body *gjson.Result) error { + d := w.ResourceData + mErr := multierror.Append(nil, + d.Set("region", w.Config.GetRegion(w.ResourceData)), + d.Set("elastic_resource_pools", schemas.SliceToList(body.Get("elastic_resource_pools"), + func(elaResPoo gjson.Result) any { + return map[string]any{ + "id": elaResPoo.Get("id").Value(), + "name": elaResPoo.Get("elastic_resource_pool_name").Value(), + "max_cu": elaResPoo.Get("max_cu").Value(), + "min_cu": elaResPoo.Get("min_cu").Value(), + "current_cu": elaResPoo.Get("current_cu").Value(), + "actual_cu": elaResPoo.Get("actual_cu").Value(), + "cidr": elaResPoo.Get("cidr_in_vpc").Value(), + "resource_id": elaResPoo.Get("resource_id").Value(), + "enterprise_project_id": elaResPoo.Get("enterprise_project_id").Value(), + "queues": schemas.SliceToStrList(elaResPoo.Get("queues")), + "description": elaResPoo.Get("description").Value(), + "status": elaResPoo.Get("status").Value(), + "owner": elaResPoo.Get("owner").Value(), + "manager": elaResPoo.Get("manager").Value(), + "fail_reason": elaResPoo.Get("fail_reason").Value(), + "created_at": w.setElaResPooCreTim(body, &elaResPoo), + } + }, + )), + ) + return mErr.ErrorOrNil() +} + +func (w *ElasticPoolsDSWrapper) getTags() string { + raw := w.Get("tags") + if raw == nil { + return "" + } + + tags := raw.(map[string]interface{}) + tagsList := make([]string, 0, len(tags)) + for k, v := range tags { + tagsList = append(tagsList, k+"="+v.(string)) + } + return strings.Join(tagsList, ",") +} + +func (*ElasticPoolsDSWrapper) setElaResPooCreTim(_, data *gjson.Result) string { + return utils.FormatTimeStampRFC3339(data.Get("create_time").Int()/1000, false) +}