Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

resource/aws_rds_global_cluster: Add force_destroy and source_db_cluster_identifier arguments, add global_cluster_members attribute #14487

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
151 changes: 144 additions & 7 deletions aws/resource_aws_rds_global_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,10 @@ import (
"github.com/hashicorp/terraform-plugin-sdk/helper/validation"
)

const (
rdsGlobalClusterRemovalTimeout = 2 * time.Minute
)

func resourceAwsRDSGlobalCluster() *schema.Resource {
return &schema.Resource{
Create: resourceAwsRDSGlobalClusterCreate,
Expand All @@ -38,10 +42,11 @@ func resourceAwsRDSGlobalCluster() *schema.Resource {
Default: false,
},
"engine": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
Default: "aurora",
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
ConflictsWith: []string{"source_db_cluster_identifier"},
ValidateFunc: validation.StringInSlice([]string{
"aurora",
"aurora-mysql",
Expand All @@ -54,15 +59,43 @@ func resourceAwsRDSGlobalCluster() *schema.Resource {
Computed: true,
ForceNew: true,
},
"force_destroy": {
Type: schema.TypeBool,
Optional: true,
},
"global_cluster_identifier": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"global_cluster_members": {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

💯

Type: schema.TypeSet,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"db_cluster_arn": {
Type: schema.TypeString,
Computed: true,
},
"is_writer": {
Type: schema.TypeBool,
Computed: true,
},
},
},
},
"global_cluster_resource_id": {
Type: schema.TypeString,
Computed: true,
},
"source_db_cluster_identifier": {
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
ConflictsWith: []string{"engine"},
RequiredWith: []string{"force_destroy"},
},
"storage_encrypted": {
Type: schema.TypeBool,
Optional: true,
Expand All @@ -76,15 +109,17 @@ func resourceAwsRDSGlobalClusterCreate(d *schema.ResourceData, meta interface{})
conn := meta.(*AWSClient).rdsconn

input := &rds.CreateGlobalClusterInput{
DeletionProtection: aws.Bool(d.Get("deletion_protection").(bool)),
GlobalClusterIdentifier: aws.String(d.Get("global_cluster_identifier").(string)),
StorageEncrypted: aws.Bool(d.Get("storage_encrypted").(bool)),
}

if v, ok := d.GetOk("database_name"); ok {
input.DatabaseName = aws.String(v.(string))
}

if v, ok := d.GetOk("deletion_protection"); ok {
input.DeletionProtection = aws.Bool(v.(bool))
}

if v, ok := d.GetOk("engine"); ok {
input.Engine = aws.String(v.(string))
}
Expand All @@ -93,7 +128,21 @@ func resourceAwsRDSGlobalClusterCreate(d *schema.ResourceData, meta interface{})
input.EngineVersion = aws.String(v.(string))
}

log.Printf("[DEBUG] Creating RDS Global Cluster: %s", input)
if v, ok := d.GetOk("source_db_cluster_identifier"); ok {
input.SourceDBClusterIdentifier = aws.String(v.(string))
}

if v, ok := d.GetOk("storage_encrypted"); ok {
input.StorageEncrypted = aws.Bool(v.(bool))
}

// Prevent the following error and keep the previous default,
// since we cannot have Engine default after adding SourceDBClusterIdentifier:
// InvalidParameterValue: When creating standalone global cluster, value for engineName should be specified
if input.Engine == nil && input.SourceDBClusterIdentifier == nil {
input.Engine = aws.String("aurora")
}

output, err := conn.CreateGlobalCluster(input)
if err != nil {
return fmt.Errorf("error creating RDS Global Cluster: %s", err)
Expand Down Expand Up @@ -141,6 +190,11 @@ func resourceAwsRDSGlobalClusterRead(d *schema.ResourceData, meta interface{}) e
d.Set("engine", globalCluster.Engine)
d.Set("engine_version", globalCluster.EngineVersion)
d.Set("global_cluster_identifier", globalCluster.GlobalClusterIdentifier)

if err := d.Set("global_cluster_members", flattenRdsGlobalClusterMembers(globalCluster.GlobalClusterMembers)); err != nil {
return fmt.Errorf("error setting global_cluster_members: %w", err)
}

d.Set("global_cluster_resource_id", globalCluster.GlobalClusterResourceId)
d.Set("storage_encrypted", globalCluster.StorageEncrypted)

Expand Down Expand Up @@ -176,6 +230,35 @@ func resourceAwsRDSGlobalClusterUpdate(d *schema.ResourceData, meta interface{})
func resourceAwsRDSGlobalClusterDelete(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).rdsconn

if d.Get("force_destroy").(bool) {
for _, globalClusterMemberRaw := range d.Get("global_cluster_members").(*schema.Set).List() {
globalClusterMember, ok := globalClusterMemberRaw.(map[string]interface{})

if !ok {
continue
}

dbClusterArn, ok := globalClusterMember["db_cluster_arn"].(string)

if !ok {
continue
}

input := &rds.RemoveFromGlobalClusterInput{
DbClusterIdentifier: aws.String(dbClusterArn),
GlobalClusterIdentifier: aws.String(d.Id()),
}

if _, err := conn.RemoveFromGlobalCluster(input); err != nil {
return fmt.Errorf("error removing RDS DB Cluster (%s) from Global Cluster (%s): %w", dbClusterArn, d.Id(), err)
}

if err := waitForRdsGlobalClusterRemoval(conn, dbClusterArn); err != nil {
return fmt.Errorf("error waiting for RDS DB Cluster (%s) removal from RDS Global Cluster (%s): %w", dbClusterArn, d.Id(), err)
}
}
}

input := &rds.DeleteGlobalClusterInput{
GlobalClusterIdentifier: aws.String(d.Id()),
}
Expand Down Expand Up @@ -217,6 +300,25 @@ func resourceAwsRDSGlobalClusterDelete(d *schema.ResourceData, meta interface{})
return nil
}

func flattenRdsGlobalClusterMembers(apiObjects []*rds.GlobalClusterMember) []interface{} {
if len(apiObjects) == 0 {
return nil
}

var tfList []interface{}

for _, apiObject := range apiObjects {
tfMap := map[string]interface{}{
"db_cluster_arn": aws.StringValue(apiObject.DBClusterArn),
"is_writer": aws.BoolValue(apiObject.IsWriter),
}

tfList = append(tfList, tfMap)
}

return tfList
}

func rdsDescribeGlobalCluster(conn *rds.RDS, globalClusterID string) (*rds.GlobalCluster, error) {
var globalCluster *rds.GlobalCluster

Expand Down Expand Up @@ -353,3 +455,38 @@ func waitForRdsGlobalClusterDeletion(conn *rds.RDS, globalClusterID string) erro

return err
}

func waitForRdsGlobalClusterRemoval(conn *rds.RDS, dbClusterIdentifier string) error {
var globalCluster *rds.GlobalCluster
stillExistsErr := fmt.Errorf("RDS DB Cluster still exists in RDS Global Cluster")

err := resource.Retry(rdsGlobalClusterRemovalTimeout, func() *resource.RetryError {
var err error

globalCluster, err = rdsDescribeGlobalClusterFromDbClusterARN(conn, dbClusterIdentifier)

if err != nil {
return resource.NonRetryableError(err)
}

if globalCluster != nil {
return resource.RetryableError(stillExistsErr)
}

return nil
})

if isResourceTimeoutError(err) {
_, err = rdsDescribeGlobalClusterFromDbClusterARN(conn, dbClusterIdentifier)
}

if err != nil {
return err
}

if globalCluster != nil {
return stillExistsErr
}

return nil
}
52 changes: 52 additions & 0 deletions aws/resource_aws_rds_global_cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -296,6 +296,34 @@ func TestAccAWSRdsGlobalCluster_EngineVersion_AuroraPostgresql(t *testing.T) {
})
}

func TestAccAWSRdsGlobalCluster_SourceDbClusterIdentifier(t *testing.T) {
var globalCluster1 rds.GlobalCluster
rName := acctest.RandomWithPrefix("tf-acc-test")
clusterResourceName := "aws_rds_cluster.test"
resourceName := "aws_rds_global_cluster.test"

resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSRdsGlobalCluster(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSRdsGlobalClusterDestroy,
Steps: []resource.TestStep{
{
Config: testAccAWSRdsGlobalClusterConfigSourceDbClusterIdentifier(rName),
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSRdsGlobalClusterExists(resourceName, &globalCluster1),
resource.TestCheckResourceAttrPair(resourceName, "source_db_cluster_identifier", clusterResourceName, "arn"),
),
},
{
ResourceName: resourceName,
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{"force_destroy", "source_db_cluster_identifier"},
},
},
})
}

func TestAccAWSRdsGlobalCluster_StorageEncrypted(t *testing.T) {
var globalCluster1, globalCluster2 rds.GlobalCluster
rName := acctest.RandomWithPrefix("tf-acc-test")
Expand Down Expand Up @@ -490,6 +518,30 @@ resource "aws_rds_global_cluster" "test" {
`, engine, engineVersion, rName)
}

func testAccAWSRdsGlobalClusterConfigSourceDbClusterIdentifier(rName string) string {
return fmt.Sprintf(`
resource "aws_rds_cluster" "test" {
cluster_identifier = %[1]q
engine = "aurora-postgresql"
engine_version = "10.11" # Minimum supported version for Global Clusters
master_password = "mustbeeightcharacters"
master_username = "test"
skip_final_snapshot = true

# global_cluster_identifier cannot be Computed
lifecycle {
ignore_changes = [global_cluster_identifier]
}
}

resource "aws_rds_global_cluster" "test" {
force_destroy = true
global_cluster_identifier = %[1]q
source_db_cluster_identifier = aws_rds_cluster.test.arn
}
`, rName)
}

func testAccAWSRdsGlobalClusterConfigStorageEncrypted(rName string, storageEncrypted bool) string {
return fmt.Sprintf(`
resource "aws_rds_global_cluster" "test" {
Expand Down
47 changes: 46 additions & 1 deletion website/docs/r/rds_global_cluster.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@ More information about Aurora global databases can be found in the [Aurora User

## Example Usage

### New Global Cluster

```hcl
provider "aws" {
alias = "primary"
Expand Down Expand Up @@ -63,23 +65,51 @@ resource "aws_rds_cluster_instance" "secondary" {
}
```

### New Global Cluster From Existing DB Cluster

```hcl
resource "aws_rds_cluster" "example" {
# ... other configuration ...

# NOTE: Using this DB Cluster to create a Global Cluster, the
# global_cluster_identifier attribute will become populated and
# Terraform will begin showing it as a difference. Do not configure:
# global_cluster_identifier = aws_rds_global_cluster.example.id
# as it creates a circular reference. Use ignore_changes instead.
lifecycle {
ignore_changes = [global_cluster_identifier]
}
}

resource "aws_rds_global_cluster" "example" {
force_destroy = true
global_cluster_identifier = "example"
source_db_cluster_identifier = aws_rds_cluster.example.arn
}
```

## Argument Reference

The following arguments are supported:

* `global_cluster_identifier` - (Required, Forces new resources) The global cluster identifier.
* `database_name` - (Optional, Forces new resources) Name for an automatically created database on cluster creation.
* `deletion_protection` - (Optional) If the Global Cluster should have deletion protection enabled. The database can't be deleted when this value is set to `true`. The default is `false`.
* `engine` - (Optional, Forces new resources) Name of the database engine to be used for this DB cluster. Valid values: `aurora`, `aurora-mysql`, `aurora-postgresql`. Defaults to `aurora`.
* `engine` - (Optional, Forces new resources) Name of the database engine to be used for this DB cluster. Terraform will only perform drift detection if a configuration value is provided. Valid values: `aurora`, `aurora-mysql`, `aurora-postgresql`. Defaults to `aurora`. Conflicts with `source_db_cluster_identifier`.
* `engine_version` - (Optional, Forces new resources) Engine version of the Aurora global database.
* **NOTE:** When the engine is set to `aurora-mysql`, an engine version compatible with global database is required. The earliest available version is `5.7.mysql_aurora.2.06.0`.
* `force_destroy` - (Optional) Enable to remove DB Cluster members from Global Cluster on destroy. Required with `source_db_cluster_identifier`.
* `source_db_cluster_identifier` - (Optional) Amazon Resource Name (ARN) to use as the primary DB Cluster of the Global Cluster on creation. Terraform cannot perform drift detection of this value.
* `storage_encrypted` - (Optional, Forces new resources) Specifies whether the DB cluster is encrypted. The default is `false`.

## Attribute Reference

In addition to all arguments above, the following attributes are exported:

* `arn` - RDS Global Cluster Amazon Resource Name (ARN)
* `global_cluster_members` - Set of objects containing Global Cluster members.
* `db_cluster_arn` - Amazon Resource Name (ARN) of member DB Cluster
* `is_writer` - Whether the member is the primary DB Cluster
* `global_cluster_resource_id` - AWS Region-unique, immutable identifier for the global database cluster. This identifier is found in AWS CloudTrail log entries whenever the AWS KMS key for the DB cluster is accessed
* `id` - RDS Global Cluster identifier

Expand All @@ -90,3 +120,18 @@ In addition to all arguments above, the following attributes are exported:
```
$ terraform import aws_rds_global_cluster.example example
```

Certain resource arguments, like `force_destroy`, only exist within Terraform. If the argument is set in the Terraform configuration on an imported resource, Terraform will show a difference on the first plan after import to update the state value. This change is safe to apply immediately so the state matches the desired configuration.

Certain resource arguments, like `source_db_cluster_identifier`, do not have an API method for reading the information after creation. If the argument is set in the Terraform configuration on an imported resource, Terraform will always show a difference. To workaround this behavior, either omit the argument from the Terraform configuration or use [`ignore_changes`](/docs/configuration/resources.html#ignore_changes) to hide the difference, e.g.

```hcl
resource "aws_rds_global_cluster" "example" {
# ... other configuration ...

# There is no API for reading source_db_cluster_identifier
lifecycle {
ignore_changes = [source_db_cluster_identifier]
}
}
```